github.com/qri-io/qri@v0.10.1-0.20220104210721-c771715036cb/logbook/logbook_test.go (about) 1 package logbook_test 2 3 import ( 4 "context" 5 "encoding/json" 6 "errors" 7 "fmt" 8 "io/ioutil" 9 "regexp" 10 "testing" 11 "time" 12 13 "github.com/google/go-cmp/cmp" 14 crypto "github.com/libp2p/go-libp2p-core/crypto" 15 "github.com/qri-io/dataset" 16 "github.com/qri-io/qfs" 17 testkeys "github.com/qri-io/qri/auth/key/test" 18 "github.com/qri-io/qri/automation/run" 19 "github.com/qri-io/qri/dsref" 20 dsrefspec "github.com/qri-io/qri/dsref/spec" 21 "github.com/qri-io/qri/event" 22 "github.com/qri-io/qri/logbook" 23 "github.com/qri-io/qri/logbook/oplog" 24 "github.com/qri-io/qri/profile" 25 profiletest "github.com/qri-io/qri/profile/test" 26 ) 27 28 func Example() { 29 ctx := context.Background() 30 31 // logbooks are encrypted at rest, we need a private key to interact with 32 // them, including to create a new logbook. This is a dummy Private Key 33 // you should never, ever use in real life. demo only folks. 34 yolanda := profiletest.GetProfile("yolanda_the_rat") 35 36 // logbook relies on a qfs.Filesystem for read & write. create an in-memory 37 // filesystem we can play with 38 fs := qfs.NewMemFS() 39 40 // Create a new journal for b5, passing in: 41 // * the author private key to encrypt & decrypt the logbook 42 // * author's current username 43 // * an event bus (not used in this example) 44 // * a qfs.Filesystem for reading & writing the logbook 45 // * a base path on the filesystem to read & write the logbook to 46 // Initializing a logbook ensures the author has an user opset that matches 47 // their current state. It will error if a stored book can't be decrypted 48 book, err := logbook.NewJournal(*yolanda, event.NilBus, fs, "/mem/logbook.qfb") 49 if err != nil { 50 panic(err) // real programs don't panic 51 } 52 53 // create a name to store dataset versions in. NameInit will create a new 54 // log under the logbook author's namespace with the given name, and an opset 55 // that tracks operations by this author within that new namespace. 56 // The entire logbook is persisted to the filestore after each operation 57 initID, err := book.WriteDatasetInit(ctx, yolanda, "world_bank_population") 58 if err != nil { 59 panic(err) 60 } 61 62 // pretend we've just created a dataset, these are the only fields the log 63 // will care about 64 ds := &dataset.Dataset{ 65 ID: initID, 66 Peername: yolanda.Peername, 67 Name: "world_bank_population", 68 Commit: &dataset.Commit{ 69 Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), 70 Title: "initial commit", 71 }, 72 Path: "QmHashOfVersion1", 73 PreviousPath: "", 74 } 75 76 // create a log record of the version of a dataset. In practice this'll be 77 // part of the overall save routine that created the above ds variable 78 if err := book.WriteVersionSave(ctx, yolanda, ds, nil); err != nil { 79 panic(err) 80 } 81 82 // sometime later, we create another version 83 ds2 := &dataset.Dataset{ 84 ID: initID, 85 Peername: yolanda.Peername, 86 Name: "world_bank_population", 87 Commit: &dataset.Commit{ 88 Timestamp: time.Date(2000, time.January, 2, 0, 0, 0, 0, time.UTC), 89 Title: "added body data", 90 }, 91 Structure: &dataset.Structure{ 92 Length: 100, 93 }, 94 Path: "QmHashOfVersion2", 95 PreviousPath: "QmHashOfVersion1", 96 } 97 98 // once again, write to the log 99 if err := book.WriteVersionSave(ctx, yolanda, ds2, nil); err != nil { 100 panic(err) 101 } 102 103 ref := dsref.Ref{ 104 Username: yolanda.Peername, 105 Name: "world_bank_population", 106 } 107 108 // pretend we just published both saved versions of the dataset to the 109 // registry we log that here. Providing a revisions arg of 2 means we've 110 // published two consecutive revisions from head: the latest version, and the 111 // one before it. "registry.qri.cloud" indicates we published to a remote 112 // with that address 113 if _, _, err := book.WriteRemotePush(ctx, yolanda, initID, 2, "registry.qri.cloud"); err != nil { 114 panic(err) 115 } 116 117 // pretend the user just deleted a dataset version, well, we need to log it! 118 // VersionDelete accepts an argument of number of versions back from HEAD 119 // more complex deletes that remove pieces of history may require either 120 // composing multiple log operations 121 book.WriteVersionDelete(ctx, yolanda, initID, 1) 122 123 // create another version 124 ds3 := &dataset.Dataset{ 125 ID: initID, 126 Peername: yolanda.Peername, 127 Name: "world_bank_population", 128 Commit: &dataset.Commit{ 129 Timestamp: time.Date(2000, time.January, 3, 0, 0, 0, 0, time.UTC), 130 Title: "added meta info", 131 }, 132 Structure: &dataset.Structure{ 133 Length: 100, 134 }, 135 Path: "QmHashOfVersion3", 136 // note that we're referring to version 1 here. version 2 no longer exists 137 // this is happening outside of the log, but the log should reflect 138 // contiguous history 139 PreviousPath: "QmHashOfVersion1", 140 } 141 142 // once again, write to the log 143 if err := book.WriteVersionSave(ctx, yolanda, ds3, nil); err != nil { 144 panic(err) 145 } 146 147 // now for the fun bit. When we ask for the state of the log, it will 148 // play our opsets forward and get us the current state of the log 149 // we can also get the state of a log from the book: 150 log, err := book.Items(ctx, ref, 0, 100, "") 151 if err != nil { 152 panic(err) 153 } 154 155 for _, info := range log { 156 fmt.Println(info.SimpleRef().String()) 157 } 158 159 // Output: 160 // yolanda_the_rat/world_bank_population@QmHashOfVersion3 161 // yolanda_the_rat/world_bank_population@QmHashOfVersion1 162 } 163 164 func TestNewJournal(t *testing.T) { 165 p := *testProfile(t) 166 fs := qfs.NewMemFS() 167 168 if _, err := logbook.NewJournal(p, nil, nil, "/mem/logbook.qfb"); err == nil { 169 t.Errorf("expected missing private key arg to error") 170 } 171 if _, err := logbook.NewJournal(p, nil, nil, "/mem/logbook.qfb"); err == nil { 172 t.Errorf("expected missing filesystem arg to error") 173 } 174 if _, err := logbook.NewJournal(p, nil, fs, ""); err == nil { 175 t.Errorf("expected missing location arg to error") 176 } 177 if _, err := logbook.NewJournal(p, nil, fs, ""); err == nil { 178 t.Errorf("expected nil event bus to error") 179 } 180 181 _, err := logbook.NewJournal(p, event.NilBus, fs, "/mem/logbook.qfb") 182 if err != nil { 183 t.Fatal(err) 184 } 185 } 186 187 func TestNilCallable(t *testing.T) { 188 var ( 189 book *logbook.Book 190 initID = "" 191 ctx = context.Background() 192 err error 193 ) 194 195 if err = book.MergeLog(ctx, nil, &oplog.Log{}); err != logbook.ErrNoLogbook { 196 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 197 } 198 if err = book.RemoveLog(ctx, dsref.Ref{}); err != logbook.ErrNoLogbook { 199 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 200 } 201 if err = book.ConstructDatasetLog(ctx, nil, dsref.Ref{}, nil); err != logbook.ErrNoLogbook { 202 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 203 } 204 if err = book.WriteAuthorRename(ctx, nil, ""); err != logbook.ErrNoLogbook { 205 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 206 } 207 if _, err = book.WriteDatasetInit(ctx, nil, ""); err != logbook.ErrNoLogbook { 208 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 209 } 210 if err = book.WriteDatasetRename(ctx, nil, initID, ""); err != logbook.ErrNoLogbook { 211 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 212 } 213 if err = book.WriteDatasetDeleteAll(ctx, nil, initID); err != logbook.ErrNoLogbook { 214 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 215 } 216 if _, _, err = book.WriteRemotePush(ctx, nil, initID, 0, ""); err != logbook.ErrNoLogbook { 217 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 218 } 219 if _, _, err = book.WriteRemoteDelete(ctx, nil, initID, 0, ""); err != logbook.ErrNoLogbook { 220 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 221 } 222 if err = book.WriteVersionAmend(ctx, nil, nil); err != logbook.ErrNoLogbook { 223 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 224 } 225 if err = book.WriteVersionDelete(ctx, nil, initID, 0); err != logbook.ErrNoLogbook { 226 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 227 } 228 if err = book.WriteVersionSave(ctx, nil, nil, nil); err != logbook.ErrNoLogbook { 229 t.Errorf("expected '%s', got: %v", logbook.ErrNoLogbook, err) 230 } 231 if _, err = book.ResolveRef(ctx, nil); err != dsref.ErrRefNotFound { 232 t.Errorf("expected '%s', got: %v", dsref.ErrRefNotFound, err) 233 } 234 } 235 236 func TestResolveRef(t *testing.T) { 237 tr, cleanup := newTestRunner(t) 238 defer cleanup() 239 240 if _, err := (*logbook.Book)(nil).ResolveRef(tr.Ctx, nil); err != dsref.ErrRefNotFound { 241 t.Errorf("book ResolveRef must be nil-callable. expected: %q, got %v", dsref.ErrRefNotFound, err) 242 } 243 244 book := tr.Book 245 dsrefspec.AssertResolverSpec(t, book, func(ref dsref.Ref, author *profile.Profile, log *oplog.Log) error { 246 return book.MergeLog(tr.Ctx, author.PrivKey.GetPublic(), log) 247 }) 248 } 249 250 func TestBookLogEntries(t *testing.T) { 251 tr, cleanup := newTestRunner(t) 252 defer cleanup() 253 254 tr.WriteWorldBankExample(t) 255 256 entries, err := tr.Book.LogEntries(tr.Ctx, tr.WorldBankRef(), 0, 30) 257 if err != nil { 258 t.Fatal(err) 259 } 260 261 got := make([]string, len(entries)) 262 for i, entry := range entries { 263 // convert timestamps to UTC for consistent output 264 entry.Timestamp = entry.Timestamp.UTC() 265 got[i] = entry.String() 266 t.Log(got[i]) 267 } 268 269 expect := []string{ 270 "12:02AM\ttest_author\tinit branch\tmain", 271 "12:00AM\ttest_author\tsave commit\tinitial commit", 272 "12:00AM\ttest_author\tsave commit\tadded body data", 273 "12:03AM\ttest_author\tpublish\t", 274 "12:04AM\ttest_author\tunpublish\t", 275 "12:00AM\ttest_author\tremove commit\t", 276 "12:00AM\ttest_author\tamend commit\tadded meta info", 277 } 278 279 if diff := cmp.Diff(expect, got); diff != "" { 280 t.Errorf("result mismatch (-want +got):\n%s", diff) 281 } 282 } 283 284 func TestUserDatasetBranchesLog(t *testing.T) { 285 tr, cleanup := newTestRunner(t) 286 defer cleanup() 287 288 tr.WriteWorldBankExample(t) 289 tr.WriteRenameExample(t) 290 291 if _, err := tr.Book.UserDatasetBranchesLog(tr.Ctx, ""); err == nil { 292 t.Error("expected LogBytes with empty ref to fail") 293 } 294 295 if _, err := tr.Book.UserDatasetBranchesLog(tr.Ctx, tr.renameInitID); err != nil { 296 t.Error(err) 297 } 298 299 got, err := tr.Book.UserDatasetBranchesLog(tr.Ctx, tr.worldBankInitID) 300 if err != nil { 301 t.Fatal(err) 302 } 303 304 justWorldBank := logbook.NewPlainLog(got) 305 expect := tr.WorldBankPlainLog() 306 307 if diff := cmp.Diff(expect, justWorldBank); diff != "" { 308 t.Errorf("result mismatch (-want +got):\n%s", diff) 309 } 310 } 311 312 func TestLogBytes(t *testing.T) { 313 tr, cleanup := newTestRunner(t) 314 defer cleanup() 315 316 tr.WriteRenameExample(t) 317 log, err := tr.Book.UserDatasetBranchesLog(tr.Ctx, tr.RenameRef().InitID) 318 if err != nil { 319 t.Error(err) 320 } 321 data, err := tr.Book.LogBytes(log, tr.Owner.PrivKey) 322 if err != nil { 323 t.Error(err) 324 } 325 326 if len(data) < 1 { 327 t.Errorf("expected data to be populated") 328 } 329 } 330 331 func TestDsRefAliasForLog(t *testing.T) { 332 tr, cleanup := newTestRunner(t) 333 defer cleanup() 334 335 tr.WriteWorldBankExample(t) 336 tr.WriteRenameExample(t) 337 egDatasetInitID := tr.RenameRef().InitID 338 log, err := tr.Book.UserDatasetBranchesLog(tr.Ctx, egDatasetInitID) 339 if err != nil { 340 t.Error(err) 341 } 342 343 if _, err := logbook.DsrefAliasForLog(nil); err == nil { 344 t.Error("expected nil ref to error") 345 } 346 347 wrongModelLog, err := tr.Book.UserDatasetBranchesLog(tr.Ctx, egDatasetInitID) 348 if err != nil { 349 t.Fatal(err) 350 } 351 // use dataset oplog instead of user, which is wrong 352 wrongModelLog = wrongModelLog.Logs[0] 353 354 if _, err := logbook.DsrefAliasForLog(wrongModelLog); err == nil { 355 t.Error("expected converting log of wrong model to error") 356 } 357 358 // TODO(b5) - not sure this is a proper test of ambiguity 359 ambiguousLog, err := tr.Book.UserDatasetBranchesLog(tr.Ctx, egDatasetInitID) 360 if err != nil { 361 t.Fatal(err) 362 } 363 ambiguousLog = ambiguousLog.Logs[0] 364 365 if _, err := logbook.DsrefAliasForLog(ambiguousLog); err == nil { 366 t.Error("expected converting ambiguous logs to error") 367 } 368 369 ref, err := logbook.DsrefAliasForLog(log) 370 if err != nil { 371 t.Error(err) 372 } 373 374 expect := dsref.Ref{ 375 Username: tr.RenameRef().Username, 376 Name: tr.RenameRef().Name, 377 ProfileID: "QmZePf5LeXow3RW5U1AgEiNbW46YnRGhZ7HPvm1UmPFPwt", 378 } 379 380 if diff := cmp.Diff(expect, ref); diff != "" { 381 t.Errorf("result mismatch. (-want +got):\n%s", diff) 382 } 383 } 384 385 func TestWritePermissions(t *testing.T) { 386 ctx, cancel := context.WithCancel(context.Background()) 387 defer cancel() 388 389 tr, cleanup := newTestRunner(t) 390 defer cleanup() 391 392 otherLogbook := tr.foreignLogbook(t, "janelle") 393 394 initID, log := GenerateExampleOplog(ctx, t, otherLogbook, "atmospheric_particulates", "/ipld/QmExample") 395 396 if err := tr.Book.MergeLog(ctx, otherLogbook.Owner().PubKey, log); err != nil { 397 t.Fatal(err) 398 } 399 400 author := tr.Owner 401 if err := tr.Book.WriteDatasetRename(ctx, author, initID, "foo"); !errors.Is(err, logbook.ErrAccessDenied) { 402 t.Errorf("WriteDatasetRename to an oplog the book author doesn't own must return a wrap of logbook.ErrAccessDenied") 403 } 404 if err := tr.Book.WriteDatasetDeleteAll(ctx, author, initID); !errors.Is(err, logbook.ErrAccessDenied) { 405 t.Errorf("WriteDatasetDeleteAll to an oplog the book author doesn't own must return a wrap of logbook.ErrAccessDenied") 406 } 407 408 ds := &dataset.Dataset{ 409 ID: initID, 410 Peername: author.Peername, 411 Name: "atmospheric_particulates", 412 Commit: &dataset.Commit{ 413 Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), 414 Title: "initial commit", 415 }, 416 Path: "HashOfVersion1", 417 } 418 if err := tr.Book.WriteVersionSave(ctx, author, ds, nil); !errors.Is(err, logbook.ErrAccessDenied) { 419 t.Errorf("WriteVersionSave to an oplog the book author doesn't own must return a wrap of logbook.ErrAccessDenied") 420 } 421 if err := tr.Book.WriteVersionAmend(ctx, author, ds); !errors.Is(err, logbook.ErrAccessDenied) { 422 t.Errorf("WriteVersionAmend to an oplog the book author doesn't own must return a wrap of logbook.ErrAccessDenied") 423 } 424 if err := tr.Book.WriteVersionDelete(ctx, author, initID, 1); !errors.Is(err, logbook.ErrAccessDenied) { 425 t.Errorf("WriteVersionDelete to an oplog the book author doesn't own must return a wrap of logbook.ErrAccessDenied") 426 } 427 if _, _, err := tr.Book.WriteRemotePush(ctx, author, initID, 1, "https://registry.example.com"); !errors.Is(err, logbook.ErrAccessDenied) { 428 t.Errorf("WriteRemotePush to an oplog the book author doesn't own must return a wrap of logbook.ErrAccessDenied") 429 } 430 if _, _, err := tr.Book.WriteRemoteDelete(ctx, author, initID, 1, "https://registry.example.com"); !errors.Is(err, logbook.ErrAccessDenied) { 431 t.Errorf("WriteRemoteDelete to an oplog the book author doesn't own must return a wrap of logbook.ErrAccessDenied") 432 } 433 } 434 435 func TestProfileCanWrite(t *testing.T) { 436 ctx, cancel := context.WithCancel(context.Background()) 437 defer cancel() 438 439 tr, cleanup := newTestRunner(t) 440 defer cleanup() 441 442 otherLogbook := tr.foreignLogbook(t, "janelle") 443 444 initID, log := GenerateExampleOplog(ctx, t, otherLogbook, "atmospheric_particulates", "/ipld/QmExample") 445 446 if err := tr.Book.MergeLog(ctx, otherLogbook.Owner().PubKey, log); err != nil { 447 t.Fatal(err) 448 } 449 450 author := tr.Owner 451 if err := tr.Book.ProfileCanWrite(ctx, initID, author); !errors.Is(err, logbook.ErrAccessDenied) { 452 t.Errorf("ProfileCanWrite must return a wrap of logbook.ErrAccessDenied for foreign datasets") 453 } 454 455 } 456 457 func TestPushModel(t *testing.T) { 458 tr, cleanup := newTestRunner(t) 459 defer cleanup() 460 461 ctx, cancel := context.WithCancel(tr.Ctx) 462 defer cancel() 463 464 author := tr.Owner 465 466 initID, err := tr.Book.WriteDatasetInit(ctx, author, "publish_test") 467 if err != nil { 468 t.Fatal(err) 469 } 470 471 // TODO (b5) - we should have a check like this: 472 // if _, _, err := tr.Book.WriteRemotePush(ctx, initID, 1, "example/remote/address"); err == nil { 473 // t.Error("expected writing a push with no available versions to fail, got none") 474 // } 475 476 err = tr.Book.WriteVersionSave(ctx, author, &dataset.Dataset{ 477 ID: initID, 478 Peername: author.Peername, 479 Name: "atmospheric_particulates", 480 Commit: &dataset.Commit{ 481 Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), 482 Title: "initial commit", 483 }, 484 Path: "HashOfVersion1", 485 }, nil) 486 if err != nil { 487 t.Fatal(err) 488 } 489 490 lg, rollback, err := tr.Book.WriteRemotePush(ctx, author, initID, 1, "example/remote/address") 491 if err != nil { 492 t.Errorf("error writing push: %q", err) 493 } 494 495 if len(lg.Logs[0].Logs[0].Ops) != 3 { 496 t.Errorf("expected branch log to have 3 operations. got: %d", len(lg.Logs[0].Logs[0].Ops)) 497 } 498 499 t.Log(tr.Book.SummaryString(ctx) + "\n\n") 500 501 if err = rollback(ctx); err != nil { 502 t.Errorf("rollback error: %q", err) 503 } 504 if err = rollback(ctx); err != nil { 505 t.Errorf("rollback error: %q", err) 506 } 507 508 t.Log(tr.Book.SummaryString(ctx)) 509 510 lg, err = tr.Book.UserDatasetBranchesLog(ctx, initID) 511 if err != nil { 512 t.Fatal(err) 513 } 514 515 if len(lg.Logs[0].Logs[0].Ops) != 2 { 516 t.Errorf("expected branch log to have 2 operations after rollback. got: %d", len(lg.Logs[0].Logs[0].Ops)) 517 } 518 519 _, _, err = tr.Book.WriteRemotePush(ctx, author, initID, 1, "example/remote/address") 520 if err != nil { 521 t.Errorf("error writing push: %q", err) 522 } 523 524 lg, rollback, err = tr.Book.WriteRemoteDelete(ctx, author, initID, 1, "example/remote/address") 525 if err != nil { 526 t.Errorf("error writing delete: %q", err) 527 } 528 529 if len(lg.Logs[0].Logs[0].Ops) != 4 { 530 t.Errorf("expected branch log to have 4 operations after writing push & delete push. got: %d", len(lg.Logs[0].Logs[0].Ops)) 531 } 532 if err := rollback(ctx); err != nil { 533 t.Errorf("rollback error: %q", err) 534 } 535 if err = rollback(ctx); err != nil { 536 t.Errorf("extra calls to rollback should not error. got: %q", err) 537 } 538 539 lg, err = tr.Book.UserDatasetBranchesLog(ctx, initID) 540 if err != nil { 541 t.Fatal(err) 542 } 543 if len(lg.Logs[0].Logs[0].Ops) != 3 { 544 t.Errorf("expected branch log to have 3 operations after writing push & delete push. got: %d", len(lg.Logs[0].Logs[0].Ops)) 545 } 546 547 } 548 549 func TestDatasetLogNaming(t *testing.T) { 550 tr, cleanup := newTestRunner(t) 551 defer cleanup() 552 var err error 553 author := tr.Owner 554 555 if _, err = tr.Book.WriteDatasetInit(tr.Ctx, author, ""); err == nil { 556 t.Errorf("expected initializing with an empty name to error") 557 } 558 firstInitID, err := tr.Book.WriteDatasetInit(tr.Ctx, author, "airport_codes") 559 if err != nil { 560 t.Fatalf("unexpected error writing valid dataset name: %s", err) 561 } 562 563 if err = tr.Book.WriteDatasetRename(tr.Ctx, author, firstInitID, "iata_airport_codes"); err != nil { 564 t.Errorf("unexpected error renaming dataset: %s", err) 565 } 566 if _, err = tr.Book.RefToInitID(dsref.Ref{Username: "test_peer_dataset_log_naming", Name: "airport_codes"}); err == nil { 567 t.Error("expected finding the original name to error") 568 } 569 // Init another dataset with the old name, which is now available due to rename. 570 if _, err = tr.Book.WriteDatasetInit(tr.Ctx, author, "airport_codes"); err != nil { 571 t.Fatalf("unexpected error writing recently freed-up dataset name: %s", err) 572 } 573 if err = tr.Book.WriteDatasetDeleteAll(tr.Ctx, author, firstInitID); err != nil { 574 t.Errorf("unexpected error deleting first dataset: %s", err) 575 } 576 _, err = tr.Book.WriteDatasetInit(tr.Ctx, author, "iata_airport_codes") 577 if err != nil { 578 t.Errorf("expected initializing new name with deleted dataset to not error: %s", err) 579 } 580 581 const ( 582 profileID = "QmZePf5LeXow3RW5U1AgEiNbW46YnRGhZ7HPvm1UmPFPwt" 583 authorID = "tz7ffwfj6e6z2xvdqgh2pf6gjkza5nzlncbjrj54s5s5eh46ma3q" 584 ) 585 586 expect := []logbook.PlainLog{ 587 { 588 Ops: []logbook.PlainOp{ 589 {Type: "init", Model: "user", Name: "test_author", AuthorID: profileID, Timestamp: mustTime("1999-12-31T19:00:00-05:00")}, 590 }, 591 Logs: []logbook.PlainLog{ 592 { 593 Ops: []logbook.PlainOp{ 594 {Type: "init", Model: "dataset", Name: "airport_codes", AuthorID: authorID, Timestamp: mustTime("1999-12-31T19:01:00-05:00")}, 595 {Type: "amend", Model: "dataset", Name: "iata_airport_codes", Timestamp: mustTime("1999-12-31T19:03:00-05:00")}, 596 {Type: "remove", Model: "dataset", Timestamp: mustTime("1999-12-31T19:06:00-05:00")}, 597 }, 598 Logs: []logbook.PlainLog{ 599 { 600 Ops: []logbook.PlainOp{ 601 {Type: "init", Model: "branch", Name: "main", AuthorID: authorID, Timestamp: mustTime("1999-12-31T19:02:00-05:00")}, 602 }, 603 }, 604 }, 605 }, 606 { 607 Ops: []logbook.PlainOp{ 608 {Type: "init", Model: "dataset", Name: "airport_codes", AuthorID: authorID, Timestamp: mustTime("1999-12-31T19:04:00-05:00")}, 609 }, 610 Logs: []logbook.PlainLog{ 611 { 612 Ops: []logbook.PlainOp{ 613 {Type: "init", Model: "branch", Name: "main", AuthorID: authorID, Timestamp: mustTime("1999-12-31T19:05:00-05:00")}, 614 }, 615 }, 616 }, 617 }, 618 { 619 Ops: []logbook.PlainOp{ 620 {Type: "init", Model: "dataset", Name: "iata_airport_codes", AuthorID: authorID, Timestamp: mustTime("1999-12-31T19:07:00-05:00")}, 621 }, 622 Logs: []logbook.PlainLog{ 623 { 624 Ops: []logbook.PlainOp{ 625 {Type: "init", Model: "branch", Name: "main", AuthorID: authorID, Timestamp: mustTime("1999-12-31T19:08:00-05:00")}, 626 }, 627 }, 628 }, 629 }, 630 }, 631 }, 632 } 633 634 got, err := tr.Book.PlainLogs(tr.Ctx) 635 if err != nil { 636 t.Fatal(err) 637 } 638 639 if diff := cmp.Diff(expect, got); diff != "" { 640 t.Errorf("result mismatch (-want +got):\n%s", diff) 641 } 642 643 if _, err = tr.Book.WriteDatasetInit(tr.Ctx, author, "overwrite"); err != nil { 644 t.Fatalf("unexpected error writing valid dataset name: %s", err) 645 } 646 if _, err = tr.Book.WriteDatasetInit(tr.Ctx, author, "overwrite"); err != nil { 647 t.Fatalf("unexpected error overwrite an empty dataset history: %s", err) 648 } 649 err = tr.Book.WriteVersionSave(tr.Ctx, author, &dataset.Dataset{ 650 ID: firstInitID, 651 Peername: author.Peername, 652 Name: "atmospheric_particulates", 653 Commit: &dataset.Commit{ 654 Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), 655 Title: "initial commit", 656 }, 657 Path: "HashOfVersion1", 658 }, nil) 659 if err != nil { 660 t.Fatal(err) 661 } 662 663 if _, err = tr.Book.WriteDatasetInit(tr.Ctx, author, "overwrite"); err != nil { 664 t.Error("expected initializing a name that exists with a history to error") 665 } 666 } 667 668 func TestBookPlainLogs(t *testing.T) { 669 tr, cleanup := newTestRunner(t) 670 defer cleanup() 671 672 tr.WriteWorldBankExample(t) 673 674 got, err := tr.Book.PlainLogs(tr.Ctx) 675 if err != nil { 676 t.Fatal(err) 677 } 678 679 // data, err := json.MarshalIndent(got, "", " ") 680 // if err != nil { 681 // t.Fatal(err) 682 // } 683 // t.Logf("%s", string(data)) 684 685 expect := []logbook.PlainLog{ 686 tr.WorldBankPlainLog(), 687 } 688 689 if diff := cmp.Diff(expect, got); diff != "" { 690 t.Errorf("result mismatch (-want +got):\n%s", diff) 691 } 692 } 693 694 func TestLogTransfer(t *testing.T) { 695 tr, cleanup := newTestRunner(t) 696 defer cleanup() 697 698 tr.WriteWorldBankExample(t) 699 tr.WriteRenameExample(t) 700 701 log, err := tr.Book.UserDatasetBranchesLog(tr.Ctx, tr.WorldBankRef().InitID) 702 if err != nil { 703 t.Error(err) 704 } 705 706 if len(log.Logs) != 1 { 707 t.Errorf("expected UserDatasetRef to only return one dataset log. got: %d", len(log.Logs)) 708 } 709 710 pk2 := testPrivKey2(t) 711 pro2 := mustProfileFromPrivKey("user_2", pk2) 712 fs2 := qfs.NewMemFS() 713 book2, err := logbook.NewJournal(*pro2, tr.bus, fs2, "/mem/fs2_location.qfb") 714 if err != nil { 715 t.Fatal(err) 716 } 717 718 if err := book2.MergeLog(tr.Ctx, tr.Book.Owner().PubKey, log); err == nil { 719 t.Error("expected Merging unsigned log to fail") 720 } 721 722 if err := log.Sign(tr.Book.Owner().PrivKey); err != nil { 723 t.Error(err) 724 } 725 726 if err := book2.MergeLog(tr.Ctx, tr.Book.Owner().PubKey, log); err != nil { 727 t.Fatal(err) 728 } 729 730 revs, err := book2.Items(tr.Ctx, tr.WorldBankRef(), 0, 30, "") 731 if err != nil { 732 t.Fatal(err) 733 } 734 if len(revs) == 0 { 735 t.Errorf("expected book 2 to now have versions for world bank ref") 736 } 737 } 738 739 // Test a particularly tricky situation: a user authored and pushed a dataset to a remote. Then, 740 // they reinitialize their repository with the same profileID. This creates a new logbook entry, 741 // thus they have the same profileID but a different userCreateID. Then they push again to the 742 // same remote. Test that a client is able to pull this new dataset, and it will merge into their 743 // logbook, instead of creating two entries for the same user. 744 func TestMergeWithDivergentLogbookAuthorID(t *testing.T) { 745 tr, cleanup := newTestRunner(t) 746 defer cleanup() 747 _ = tr 748 749 ctx := context.Background() 750 751 ref := dsref.MustParse("test_user/first_ds") 752 firstKeyData := testkeys.GetKeyData(0) 753 firstProfile := mustProfileFromPrivKey("test_user", firstKeyData.PrivKey) 754 firstBook := makeLogbookOneCommit(ctx, t, firstProfile, ref, "first commit", "QmHashOfVersion1") 755 756 ref = dsref.MustParse("test_user/second_ds") 757 // NOTE: Purposefully use the same crypto key pairs. This will lead to the same 758 // profileID, but different logbook userCreateIDs. 759 secondKeyData := testkeys.GetKeyData(0) 760 secondProfile := mustProfileFromPrivKey("test_user", secondKeyData.PrivKey) 761 secondBook := makeLogbookOneCommit(ctx, t, secondProfile, ref, "second commit", "QmHashOfVersion2") 762 763 // Get the log for the newly pushed dataset by initID. 764 secondInitID, err := secondBook.RefToInitID(dsref.MustParse("test_user/second_ds")) 765 if err != nil { 766 t.Fatal(err) 767 } 768 secondLog, err := secondBook.UserDatasetBranchesLog(ctx, secondInitID) 769 if err != nil { 770 t.Error(err) 771 } 772 if len(secondLog.Logs) != 1 { 773 t.Errorf("expected UserDatasetRef to only return one dataset log. got: %d", len(secondLog.Logs)) 774 } 775 776 if err := secondLog.Sign(secondProfile.PrivKey); err != nil { 777 t.Error(err) 778 } 779 780 if err := firstBook.MergeLog(ctx, secondBook.Owner().PubKey, secondLog); err != nil { 781 t.Fatal(err) 782 } 783 784 revs, err := firstBook.PlainLogs(ctx) 785 if err != nil { 786 t.Fatal(err) 787 } 788 data, err := json.Marshal(revs) 789 if err != nil { 790 t.Fatal(err) 791 } 792 output := string(data) 793 794 // Regex that replaces timestamps with just static text 795 fixTs := regexp.MustCompile(`"(timestamp|commitTime)":\s?"[0-9TZ.:+-]*?"`) 796 actual := string(fixTs.ReplaceAll([]byte(output), []byte(`"timestamp":"timeStampHere"`))) 797 expect := `[{"ops":[{"type":"init","model":"user","name":"test_user","authorID":"QmeL2mdVka1eahKENjehK6tBxkkpk5dNQ1qMcgWi7Hrb4B","timestamp":"timeStampHere"}],"logs":[{"ops":[{"type":"init","model":"dataset","name":"first_ds","authorID":"ftl4xgy5pvhfehd4h5wo5wggbac3m5dfywvp2rcohb5ayzgg6gja","timestamp":"timeStampHere"}],"logs":[{"ops":[{"type":"init","model":"branch","name":"main","authorID":"ftl4xgy5pvhfehd4h5wo5wggbac3m5dfywvp2rcohb5ayzgg6gja","timestamp":"timeStampHere"},{"type":"init","model":"commit","ref":"QmHashOfVersion1","timestamp":"timeStampHere","note":"first commit"}]}]},{"ops":[{"type":"init","model":"dataset","name":"second_ds","authorID":"i2smhmm5qrkf242wycim34ffvw4tjoxopk5bwbhleecbn4ojh4aq","timestamp":"timeStampHere"}],"logs":[{"ops":[{"type":"init","model":"branch","name":"main","authorID":"i2smhmm5qrkf242wycim34ffvw4tjoxopk5bwbhleecbn4ojh4aq","timestamp":"timeStampHere"},{"type":"init","model":"commit","ref":"QmHashOfVersion2","timestamp":"timeStampHere","note":"second commit"}]}]}]}]` 798 if diff := cmp.Diff(expect, actual); diff != "" { 799 t.Errorf("result mismatch (-want +got):\n%s", diff) 800 } 801 } 802 803 func TestRenameAuthor(t *testing.T) { 804 tr, cleanup := newTestRunner(t) 805 defer cleanup() 806 807 tr.WriteWorldBankExample(t) 808 809 // fetching dataset for original author should work 810 if _, err := tr.Book.BranchRef(tr.Ctx, tr.WorldBankRef()); err != nil { 811 t.Fatalf("fetching %s should work. got: %s", tr.WorldBankRef(), err) 812 } 813 814 author := tr.Owner 815 rename := "changed_username" 816 if err := tr.Book.WriteAuthorRename(tr.Ctx, author, rename); err != nil { 817 t.Fatalf("error renaming author: %s", err) 818 } 819 820 if rename != tr.Book.Owner().Peername { 821 t.Errorf("authorname mismatch. expected: %s, got: %s", rename, tr.Book.Owner().Peername) 822 } 823 824 // fetching dataset for original author should NOT work 825 if _, err := tr.Book.BranchRef(tr.Ctx, tr.WorldBankRef()); err == nil { 826 t.Fatalf("fetching %s must fail. got: %s", tr.WorldBankRef(), err) 827 } 828 829 r := dsref.Ref{Username: rename, Name: "world_bank_population"} 830 if _, err := tr.Book.BranchRef(tr.Ctx, r); err != nil { 831 t.Fatalf("fetching new ref shouldn't fail. got: %s", err) 832 } 833 834 } 835 836 func TestRenameDataset(t *testing.T) { 837 tr, cleanup := newTestRunner(t) 838 defer cleanup() 839 840 tr.WriteRenameExample(t) 841 842 if _, err := tr.Book.LogEntries(tr.Ctx, tr.RenameInitialRef(), 0, 30); err == nil { 843 t.Error("expected fetching renamed dataset to error") 844 } 845 846 entries, err := tr.Book.LogEntries(tr.Ctx, tr.RenameRef(), 0, 30) 847 // entries, err := tr.Book.Logs(tr.RenameInitialRef(), 0, 30) 848 if err != nil { 849 t.Fatal(err) 850 } 851 852 got := make([]string, len(entries)) 853 for i, entry := range entries { 854 // convert timestamps to UTC for consistent output 855 entry.Timestamp = entry.Timestamp.UTC() 856 got[i] = entry.String() 857 t.Log(got[i]) 858 } 859 860 expect := []string{ 861 "12:02AM\ttest_author\tinit branch\tmain", 862 "12:00AM\ttest_author\tsave commit\tinitial commit", 863 "12:00AM\ttest_author\tsave commit\tadded meta info", 864 } 865 866 if diff := cmp.Diff(expect, got); diff != "" { 867 t.Errorf("result mismatch (-want +got):\n%s", diff) 868 } 869 } 870 871 func TestItems(t *testing.T) { 872 tr, cleanup := newTestRunner(t) 873 defer cleanup() 874 875 initID := tr.WriteWorldBankExample(t) 876 tr.WriteMoreWorldBankCommits(t, initID) 877 book := tr.Book 878 879 items, err := book.Items(tr.Ctx, tr.WorldBankRef(), 0, 10, "") 880 if err != nil { 881 t.Error(err) 882 } 883 884 expect := []dsref.VersionInfo{ 885 { 886 Username: "test_author", 887 Name: "world_bank_population", 888 Path: "QmHashOfVersion5", 889 CommitTime: mustTime("2000-01-04T19:00:00-05:00"), 890 CommitTitle: "v5", 891 }, 892 { 893 Username: "test_author", 894 Name: "world_bank_population", 895 Path: "QmHashOfVersion4", 896 CommitTime: mustTime("2000-01-03T19:00:00-05:00"), 897 CommitTitle: "v4", 898 }, 899 { 900 Username: "test_author", 901 Name: "world_bank_population", 902 Path: "QmHashOfVersion3", 903 CommitTime: mustTime("2000-01-02T19:00:00-05:00"), 904 CommitTitle: "added meta info", 905 }, 906 } 907 908 if diff := cmp.Diff(expect, items); diff != "" { 909 t.Errorf("result mismatch (-want +got):\n%s", diff) 910 } 911 912 items, err = book.Items(tr.Ctx, tr.WorldBankRef(), 1, 1, "") 913 if err != nil { 914 t.Error(err) 915 } 916 917 expect = []dsref.VersionInfo{ 918 { 919 Username: "test_author", 920 Name: "world_bank_population", 921 Path: "QmHashOfVersion4", 922 CommitTime: mustTime("2000-01-03T19:00:00-05:00"), 923 CommitTitle: "v4", 924 }, 925 } 926 if diff := cmp.Diff(expect, items); diff != "" { 927 t.Errorf("result mismatch (-want +got):\n%s", diff) 928 } 929 } 930 931 func TestFilteredItems(t *testing.T) { 932 tr, cleanup := newTestRunner(t) 933 defer cleanup() 934 935 _ = tr.WriteBabyNamesExample(t) 936 book := tr.Book 937 938 items, err := book.Items(tr.Ctx, tr.BabyNamesRef(), 0, 10, "history") 939 if err != nil { 940 t.Error(err) 941 } 942 943 run2Start := mustTime("2000-01-01T04:00:00-00:00") 944 expect := []dsref.VersionInfo{ 945 { 946 Username: "test_author", 947 Name: "baby_names", 948 Path: "QmVersion2", 949 CommitTime: mustTime("2000-01-01T04:01:00-00:00"), 950 CommitTitle: "second commit", 951 RunStart: &run2Start, 952 RunDuration: time.Minute.Nanoseconds(), 953 RunID: "run2", 954 }, 955 { 956 Username: "test_author", 957 Name: "baby_names", 958 Path: "QmVersion0", 959 CommitTime: mustTime("2000-01-01T01:01:00-00:00"), 960 CommitTitle: "init dataset", 961 }, 962 } 963 964 if diff := cmp.Diff(expect, items); diff != "" { 965 t.Errorf("result mismatch (-want +got):\n%s", diff) 966 } 967 968 items, err = book.Items(tr.Ctx, tr.BabyNamesRef(), 0, -1, "run") 969 if err != nil { 970 t.Error(err) 971 } 972 973 run1Start := mustTime("2000-01-01T03:00:00-00:00") 974 expect = []dsref.VersionInfo{ 975 { 976 Username: "test_author", 977 Name: "baby_names", 978 Path: "QmVersion2", 979 CommitTime: mustTime("2000-01-01T04:01:00-00:00"), 980 CommitTitle: "second commit", 981 RunStart: &run2Start, 982 RunDuration: time.Minute.Nanoseconds(), 983 RunID: "run2", 984 }, 985 { 986 Username: "test_author", 987 Name: "baby_names", 988 RunStart: &run1Start, 989 RunDuration: time.Minute.Nanoseconds(), 990 RunID: "run1", 991 }, 992 } 993 if diff := cmp.Diff(expect, items); diff != "" { 994 t.Errorf("result mismatch (-want +got):\n%s", diff) 995 } 996 } 997 998 func TestConstructDatasetLog(t *testing.T) { 999 tr, cleanup := newTestRunner(t) 1000 defer cleanup() 1001 1002 username := tr.Owner.Peername 1003 1004 book := tr.Book 1005 name := "to_reconstruct" 1006 ref := dsref.Ref{Username: username, Name: name} 1007 history := []*dataset.Dataset{ 1008 { 1009 Peername: username, 1010 Name: name, 1011 Commit: &dataset.Commit{ 1012 Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), 1013 Title: "initial commit", 1014 }, 1015 Path: "HashOfVersion1", 1016 }, 1017 { 1018 Peername: username, 1019 Name: name, 1020 Commit: &dataset.Commit{ 1021 Timestamp: time.Date(2000, time.January, 2, 0, 0, 0, 0, time.UTC), 1022 Title: "commit 2", 1023 }, 1024 Path: "HashOfVersion2", 1025 PreviousPath: "HashOfVersion1", 1026 }, 1027 { 1028 Peername: username, 1029 Name: name, 1030 Commit: &dataset.Commit{ 1031 Timestamp: time.Date(2000, time.January, 3, 0, 0, 0, 0, time.UTC), 1032 Title: "commit 2", 1033 }, 1034 Path: "HashOfVersion3", 1035 PreviousPath: "HashOfVersion2", 1036 }, 1037 } 1038 1039 if err := book.ConstructDatasetLog(tr.Ctx, tr.Owner, ref, history); err != nil { 1040 t.Errorf("error constructing history: %s", err) 1041 } 1042 1043 if err := book.ConstructDatasetLog(tr.Ctx, tr.Owner, ref, history); err == nil { 1044 t.Error("expected second call to reconstruct to error") 1045 } 1046 1047 // now for the fun bit. When we ask for the state of the log, it will 1048 // play our opsets forward and get us the current state of tne log 1049 // we can also get the state of a log from the book: 1050 items, err := book.Items(tr.Ctx, ref, 0, 100, "") 1051 if err != nil { 1052 t.Errorf("getting items: %s", err) 1053 } 1054 1055 if len(items) != 3 { 1056 t.Errorf("expected 3 dslog items to return from history. got: %d", len(items)) 1057 } 1058 } 1059 1060 func mustTime(str string) time.Time { 1061 t, err := time.Parse(time.RFC3339, str) 1062 if err != nil { 1063 panic(err) 1064 } 1065 return t 1066 } 1067 1068 func mustProfileFromPrivKey(username string, pk crypto.PrivKey) *profile.Profile { 1069 p, err := profile.NewSparsePKProfile(username, pk) 1070 if err != nil { 1071 panic(err) 1072 } 1073 return p 1074 } 1075 1076 type testRunner struct { 1077 Ctx context.Context 1078 bus event.Bus 1079 Owner *profile.Profile 1080 Book *logbook.Book 1081 Fs qfs.Filesystem 1082 Tick int 1083 1084 renameInitID string 1085 worldBankInitID string 1086 babyNamesInitID string 1087 } 1088 1089 func newTestRunner(t *testing.T) (tr *testRunner, cleanup func()) { 1090 ctx := context.Background() 1091 fs := qfs.NewMemFS() 1092 prevTs := logbook.NewTimestamp 1093 tr = &testRunner{ 1094 Ctx: ctx, 1095 bus: event.NewBus(ctx), 1096 Owner: testProfile(t), 1097 } 1098 logbook.NewTimestamp = tr.newTimestamp 1099 1100 var err error 1101 tr.Book, err = logbook.NewJournal(*tr.Owner, tr.bus, fs, "/mem/logbook.qfb") 1102 if err != nil { 1103 t.Fatalf("creating book: %s", err.Error()) 1104 } 1105 1106 cleanup = func() { 1107 logbook.NewTimestamp = prevTs 1108 } 1109 1110 return tr, cleanup 1111 } 1112 1113 func (tr *testRunner) newTimestamp() int64 { 1114 defer func() { tr.Tick++ }() 1115 t := time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) 1116 return t.Add(time.Minute * time.Duration(tr.Tick)).UnixNano() 1117 } 1118 1119 func (tr *testRunner) WorldBankRef() dsref.Ref { 1120 return dsref.Ref{Username: tr.Owner.Peername, Name: "world_bank_population", InitID: tr.worldBankInitID} 1121 } 1122 1123 func (tr *testRunner) WorldBankID() string { 1124 return "crwd4wku64be6uxu3wbfqj7z65vtps4jt5ayx5dpjq4e2k72ks7q" 1125 } 1126 1127 func (tr *testRunner) WriteWorldBankExample(t *testing.T) string { 1128 book := tr.Book 1129 name := "world_bank_population" 1130 1131 initID, err := book.WriteDatasetInit(tr.Ctx, tr.Owner, name) 1132 if err != nil { 1133 panic(err) 1134 } 1135 tr.worldBankInitID = initID 1136 1137 // pretend we've just created a dataset, these are the only fields the log 1138 // will care about 1139 ds := &dataset.Dataset{ 1140 ID: initID, 1141 Peername: tr.Owner.Peername, 1142 Name: name, 1143 Commit: &dataset.Commit{ 1144 Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), 1145 Title: "initial commit", 1146 }, 1147 Path: "QmHashOfVersion1", 1148 PreviousPath: "", 1149 } 1150 1151 if err := book.WriteVersionSave(tr.Ctx, tr.Owner, ds, nil); err != nil { 1152 panic(err) 1153 } 1154 1155 // sometime later, we create another version 1156 ds.Commit = &dataset.Commit{ 1157 Timestamp: time.Date(2000, time.January, 2, 0, 0, 0, 0, time.UTC), 1158 Title: "added body data", 1159 } 1160 ds.Path = "QmHashOfVersion2" 1161 ds.PreviousPath = "QmHashOfVersion1" 1162 1163 if err := book.WriteVersionSave(tr.Ctx, tr.Owner, ds, nil); err != nil { 1164 t.Fatal(err) 1165 } 1166 1167 if _, _, err := book.WriteRemotePush(tr.Ctx, tr.Owner, initID, 2, "registry.qri.cloud"); err != nil { 1168 t.Fatal(err) 1169 } 1170 1171 if _, _, err := book.WriteRemoteDelete(tr.Ctx, tr.Owner, initID, 2, "registry.qri.cloud"); err != nil { 1172 t.Fatal(err) 1173 } 1174 1175 book.WriteVersionDelete(tr.Ctx, tr.Owner, initID, 1) 1176 1177 ds.Commit.Timestamp = time.Date(2000, time.January, 3, 0, 0, 0, 0, time.UTC) 1178 ds.Commit.Title = "added meta info" 1179 ds.Path = "QmHashOfVersion3" 1180 ds.PreviousPath = "QmHashOfVersion1" 1181 1182 if err := book.WriteVersionAmend(tr.Ctx, tr.Owner, ds); err != nil { 1183 t.Fatal(err) 1184 } 1185 1186 return initID 1187 } 1188 1189 func (tr *testRunner) WriteMoreWorldBankCommits(t *testing.T, initID string) { 1190 book := tr.Book 1191 name := "world_bank_population" 1192 ds := &dataset.Dataset{ 1193 ID: initID, 1194 Peername: tr.Owner.Peername, 1195 Name: name, 1196 Commit: &dataset.Commit{ 1197 Timestamp: time.Date(2000, time.January, 4, 0, 0, 0, 0, time.UTC), 1198 Title: "v4", 1199 }, 1200 Path: "QmHashOfVersion4", 1201 PreviousPath: "QmHashOfVersion3", 1202 } 1203 1204 if err := book.WriteVersionSave(tr.Ctx, tr.Owner, ds, nil); err != nil { 1205 panic(err) 1206 } 1207 1208 ds = &dataset.Dataset{ 1209 ID: initID, 1210 Peername: tr.Owner.Peername, 1211 Name: name, 1212 Commit: &dataset.Commit{ 1213 Timestamp: time.Date(2000, time.January, 5, 0, 0, 0, 0, time.UTC), 1214 Title: "v5", 1215 }, 1216 Path: "QmHashOfVersion5", 1217 PreviousPath: "QmHashOfVersion4", 1218 } 1219 1220 if err := book.WriteVersionSave(tr.Ctx, tr.Owner, ds, nil); err != nil { 1221 panic(err) 1222 } 1223 } 1224 1225 // writeBabyNamesLog yields 3 total Items, 2 "history" & 2 "run" (one is both a "history" and a "run") 1226 func (tr *testRunner) WriteBabyNamesExample(t *testing.T) string { 1227 name := "baby_names" 1228 book := tr.Book 1229 author := book.Owner() 1230 ctx := tr.Ctx 1231 1232 initID, err := book.WriteDatasetInit(ctx, author, name) 1233 if err != nil { 1234 panic(err) 1235 } 1236 tr.babyNamesInitID = initID 1237 1238 timestamp := time.Date(2000, time.January, 1, 1, 1, 0, 0, time.UTC) 1239 1240 ds := &dataset.Dataset{ 1241 ID: initID, 1242 Peername: author.Peername, 1243 Name: name, 1244 Commit: &dataset.Commit{ 1245 Timestamp: timestamp, 1246 Title: "init dataset", 1247 }, 1248 Path: "QmVersion0", 1249 PreviousPath: "", 1250 } 1251 1252 if err = book.WriteVersionSave(ctx, author, ds, nil); err != nil { 1253 panic(err) 1254 } 1255 1256 ds.Commit.Title = "first commit" 1257 ds.Commit.Timestamp = time.Date(2000, time.January, 1, 2, 1, 0, 0, time.UTC) 1258 ds.PreviousPath = ds.Path 1259 ds.Path = "QmVersion1" 1260 1261 if err = book.WriteVersionSave(ctx, author, ds, nil); err != nil { 1262 panic(err) 1263 } 1264 1265 if err = book.WriteVersionDelete(ctx, author, initID, 1); err != nil { 1266 panic(err) 1267 } 1268 1269 runStart := time.Date(2000, time.January, 1, 3, 0, 0, 0, time.UTC) 1270 runEnd := time.Date(2000, time.January, 1, 3, 1, 0, 0, time.UTC) 1271 rs := &run.State{ 1272 ID: "run1", 1273 Number: 1, 1274 StartTime: &runStart, 1275 StopTime: &runEnd, 1276 Duration: time.Minute.Nanoseconds(), 1277 } 1278 1279 if err = book.WriteTransformRun(ctx, author, initID, rs); err != nil { 1280 panic(err) 1281 } 1282 1283 ds.Commit.Title = "second commit" 1284 ds.Commit.Timestamp = time.Date(2000, time.January, 1, 4, 1, 0, 0, time.UTC) 1285 ds.Commit.RunID = "run2" 1286 ds.PreviousPath = ds.Path 1287 ds.Path = "QmVersion2" 1288 1289 rs.ID = "run2" 1290 rs.Number = 2 1291 rs.StopTime = &ds.Commit.Timestamp 1292 rs.Duration = time.Minute.Nanoseconds() 1293 runStart = time.Date(2000, time.January, 1, 4, 0, 0, 0, time.UTC) 1294 rs.StartTime = &runStart 1295 1296 if err = book.WriteVersionSave(ctx, author, ds, rs); err != nil { 1297 panic(err) 1298 } 1299 return initID 1300 } 1301 1302 func (tr *testRunner) BabyNamesRef() dsref.Ref { 1303 return dsref.Ref{Username: tr.Owner.Peername, Name: "baby_names", InitID: tr.babyNamesInitID} 1304 } 1305 1306 func (tr *testRunner) RenameInitialRef() dsref.Ref { 1307 return dsref.Ref{Username: tr.Book.Owner().Peername, Name: "dataset", InitID: tr.renameInitID} 1308 } 1309 1310 func (tr *testRunner) RenameRef() dsref.Ref { 1311 return dsref.Ref{Username: tr.Book.Owner().Peername, Name: "renamed_dataset", InitID: tr.renameInitID} 1312 } 1313 1314 func (tr *testRunner) WriteRenameExample(t *testing.T) { 1315 book := tr.Book 1316 name := "dataset" 1317 rename := "renamed_dataset" 1318 1319 initID, err := book.WriteDatasetInit(tr.Ctx, tr.Owner, name) 1320 if err != nil { 1321 panic(err) 1322 } 1323 tr.renameInitID = initID 1324 1325 // pretend we've just created a dataset, these are the only fields the log 1326 // will care about 1327 ds := &dataset.Dataset{ 1328 ID: initID, 1329 Peername: tr.Owner.Peername, 1330 Name: name, 1331 Commit: &dataset.Commit{ 1332 Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), 1333 Title: "initial commit", 1334 }, 1335 Path: "QmHashOfVersion1", 1336 PreviousPath: "", 1337 } 1338 1339 if err := book.WriteVersionSave(tr.Ctx, tr.Owner, ds, nil); err != nil { 1340 panic(err) 1341 } 1342 1343 ds.Commit.Timestamp = time.Date(2000, time.January, 3, 0, 0, 0, 0, time.UTC) 1344 ds.Commit.Title = "added meta info" 1345 ds.Path = "QmHashOfVersion2" 1346 ds.PreviousPath = "QmHashOfVersion1" 1347 1348 if err := book.WriteVersionSave(tr.Ctx, tr.Owner, ds, nil); err != nil { 1349 t.Fatal(err) 1350 } 1351 1352 if err := book.WriteDatasetRename(tr.Ctx, tr.Owner, initID, rename); err != nil { 1353 t.Fatal(err) 1354 } 1355 } 1356 1357 func testPrivKey(t *testing.T) crypto.PrivKey { 1358 return testkeys.GetKeyData(10).PrivKey 1359 } 1360 1361 func testProfile(t *testing.T) *profile.Profile { 1362 return mustProfileFromPrivKey("test_author", testPrivKey(t)) 1363 } 1364 1365 func testPrivKey2(t *testing.T) crypto.PrivKey { 1366 return testkeys.GetKeyData(9).PrivKey 1367 } 1368 1369 // ForeignLogbook creates a logbook to use as an external source of oplog data 1370 func (tr *testRunner) foreignLogbook(t *testing.T, username string) *logbook.Book { 1371 t.Helper() 1372 1373 ms := qfs.NewMemFS() 1374 pk := testPrivKey2(t) 1375 pro := mustProfileFromPrivKey(username, pk) 1376 journal, err := logbook.NewJournal(*pro, event.NilBus, ms, "/mem/logbook.qfb") 1377 if err != nil { 1378 t.Fatal(err) 1379 } 1380 1381 return journal 1382 } 1383 1384 func (tr *testRunner) WorldBankPlainLog() logbook.PlainLog { 1385 return logbook.PlainLog{ 1386 Ops: []logbook.PlainOp{ 1387 { 1388 Type: "init", 1389 Model: "user", 1390 Name: "test_author", 1391 AuthorID: "QmZePf5LeXow3RW5U1AgEiNbW46YnRGhZ7HPvm1UmPFPwt", 1392 Timestamp: mustTime("1999-12-31T19:00:00-05:00"), 1393 }, 1394 }, 1395 Logs: []logbook.PlainLog{ 1396 { 1397 Ops: []logbook.PlainOp{ 1398 { 1399 Type: "init", 1400 Model: "dataset", 1401 Name: "world_bank_population", 1402 AuthorID: "tz7ffwfj6e6z2xvdqgh2pf6gjkza5nzlncbjrj54s5s5eh46ma3q", 1403 Timestamp: mustTime("1999-12-31T19:01:00-05:00"), 1404 }, 1405 }, 1406 Logs: []logbook.PlainLog{ 1407 { 1408 Ops: []logbook.PlainOp{ 1409 { 1410 Type: "init", 1411 Model: "branch", 1412 Name: "main", 1413 AuthorID: "tz7ffwfj6e6z2xvdqgh2pf6gjkza5nzlncbjrj54s5s5eh46ma3q", 1414 Timestamp: mustTime("1999-12-31T19:02:00-05:00"), 1415 }, 1416 { 1417 Type: "init", 1418 Model: "commit", 1419 Ref: "QmHashOfVersion1", 1420 Timestamp: mustTime("1999-12-31T19:00:00-05:00"), 1421 Note: "initial commit", 1422 }, 1423 { 1424 Type: "init", 1425 Model: "commit", 1426 Ref: "QmHashOfVersion2", 1427 Prev: "QmHashOfVersion1", 1428 Timestamp: mustTime("2000-01-01T19:00:00-05:00"), 1429 Note: "added body data", 1430 }, 1431 { 1432 Type: "init", 1433 Model: "push", 1434 Relations: []string{ 1435 "registry.qri.cloud", 1436 }, 1437 Timestamp: mustTime("1999-12-31T19:03:00-05:00"), 1438 Size: 2, 1439 }, 1440 { 1441 Type: "remove", 1442 Model: "push", 1443 Relations: []string{"registry.qri.cloud"}, 1444 Timestamp: mustTime("1999-12-31T19:04:00-05:00"), 1445 Size: 2, 1446 }, 1447 { 1448 Type: "remove", 1449 Model: "commit", 1450 Timestamp: mustTime("1969-12-31T19:00:00-05:00"), 1451 Size: 1, 1452 }, 1453 { 1454 Type: "amend", 1455 Model: "commit", 1456 Ref: "QmHashOfVersion3", 1457 Prev: "QmHashOfVersion1", 1458 Timestamp: mustTime("2000-01-02T19:00:00-05:00"), 1459 Note: "added meta info", 1460 }, 1461 }, 1462 }, 1463 }, 1464 }, 1465 }, 1466 } 1467 } 1468 1469 // GenerateExampleOplog makes an example dataset history on a given journal, 1470 // returning the initID and a signed log 1471 func GenerateExampleOplog(ctx context.Context, t *testing.T, journal *logbook.Book, dsname, headPath string) (string, *oplog.Log) { 1472 author := journal.Owner() 1473 initID, err := journal.WriteDatasetInit(ctx, author, dsname) 1474 if err != nil { 1475 t.Fatal(err) 1476 } 1477 1478 err = journal.WriteVersionSave(ctx, author, &dataset.Dataset{ 1479 ID: initID, 1480 Peername: author.Peername, 1481 Name: dsname, 1482 Commit: &dataset.Commit{ 1483 Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), 1484 Title: "initial commit", 1485 }, 1486 Path: headPath, 1487 PreviousPath: "", 1488 }, nil) 1489 if err != nil { 1490 t.Fatal(err) 1491 } 1492 1493 // TODO (b5) - we need UserDatasetRef here b/c it returns the full hierarchy 1494 // of oplogs. This method should take an InitID 1495 lg, err := journal.UserDatasetBranchesLog(ctx, initID) 1496 if err != nil { 1497 t.Fatal(err) 1498 } 1499 1500 if err := lg.Sign(author.PrivKey); err != nil { 1501 t.Fatal(err) 1502 return "", nil 1503 } 1504 1505 return initID, lg 1506 } 1507 1508 func makeLogbookOneCommit(ctx context.Context, t *testing.T, pro *profile.Profile, ref dsref.Ref, commitMessage, dsPath string) *logbook.Book { 1509 rootPath, err := ioutil.TempDir("", "create_logbook") 1510 if err != nil { 1511 t.Fatal(err) 1512 } 1513 fs := qfs.NewMemFS() 1514 1515 builder := logbook.NewLogbookTempBuilder(t, pro, fs, rootPath) 1516 id := builder.DatasetInit(ctx, t, ref.Name) 1517 builder.Commit(ctx, t, id, commitMessage, dsPath) 1518 return builder.Logbook() 1519 }