github.com/qri-io/qri@v0.10.1-0.20220104210721-c771715036cb/logbook/logbook.go (about) 1 // Package logbook records and syncs dataset histories. As users work on 2 // datasets, they build of a log of operations. Each operation is a record 3 // of an action taken, like creating a dataset, or unpublishing a version. 4 // Each of these operations is wrtten to a log attributed to the user that 5 // performed the action, and stored in the logbook under the namespace of that 6 // dataset. The current state of a user's log is derived from iterating over 7 // all operations to produce the current state. 8 package logbook 9 10 import ( 11 "context" 12 "errors" 13 "fmt" 14 "io/ioutil" 15 "strings" 16 "sync" 17 "time" 18 19 golog "github.com/ipfs/go-log" 20 crypto "github.com/libp2p/go-libp2p-core/crypto" 21 "github.com/qri-io/dataset" 22 "github.com/qri-io/qfs" 23 "github.com/qri-io/qri/automation/run" 24 "github.com/qri-io/qri/dsref" 25 "github.com/qri-io/qri/event" 26 "github.com/qri-io/qri/logbook/oplog" 27 "github.com/qri-io/qri/profile" 28 ) 29 30 var ( 31 log = golog.Logger("logbook") 32 // ErrNoLogbook indicates a logbook doesn't exist 33 ErrNoLogbook = fmt.Errorf("logbook: does not exist") 34 // ErrNotFound is a sentinel error for data not found in a logbook 35 ErrNotFound = fmt.Errorf("logbook: not found") 36 // ErrLogTooShort indicates a log is missing elements. Because logs are 37 // append-only, passing a shorter log than the one on file is grounds 38 // for rejection 39 ErrLogTooShort = fmt.Errorf("logbook: log is too short") 40 // ErrAccessDenied indicates insufficent privileges to perform a logbook 41 // operation 42 ErrAccessDenied = fmt.Errorf("access denied") 43 44 // NewTimestamp generates the current unix nanosecond time. 45 // This is mainly here for tests to override 46 NewTimestamp = func() int64 { return time.Now().UnixNano() } 47 ) 48 49 const ( 50 // UserModel is the enum for an author model 51 UserModel uint32 = iota 52 // DatasetModel is the enum for a dataset model 53 DatasetModel 54 // BranchModel is the enum for a branch model 55 BranchModel 56 // CommitModel is the enum for a commit model 57 CommitModel 58 // PushModel is the enum for a push model 59 PushModel 60 // RunModel is the enum for transform execution 61 RunModel 62 // ACLModel is the enum for a acl model 63 ACLModel 64 ) 65 66 const ( 67 // DefaultBranchName is the default name all branch-level logbook data is read 68 // from and written to. we currently don't present branches as a user-facing 69 // feature in qri, but logbook supports them 70 DefaultBranchName = "main" 71 // runIDRelPrefix is a string prefix for op.Relations when recording commit ops 72 // that have a non-empty Commit.RunID field. A commit operation that has a 73 // related runID will have op.Relations = [...,"runID:run-uuid-string",...], 74 // This prefix disambiguates from other types of identifiers 75 runIDRelPrefix = "runID:" 76 ) 77 78 // ModelString gets a unique string descriptor for an integral model identifier 79 func ModelString(m uint32) string { 80 switch m { 81 case UserModel: 82 return "user" 83 case DatasetModel: 84 return "dataset" 85 case BranchModel: 86 return "branch" 87 case CommitModel: 88 return "commit" 89 case PushModel: 90 return "push" 91 case ACLModel: 92 return "acl" 93 case RunModel: 94 return "run" 95 default: 96 return "" 97 } 98 } 99 100 // Book wraps a oplog.Book with a higher-order API specific to Qri 101 type Book struct { 102 owner *profile.Profile 103 store oplog.Logstore 104 publisher event.Publisher 105 fs qfs.Filesystem 106 fsLocation string 107 } 108 109 // NewBook creates a book with a user-provided logstore 110 func NewBook(owner profile.Profile, bus event.Publisher, store oplog.Logstore) *Book { 111 return &Book{ 112 owner: &owner, 113 store: store, 114 publisher: bus, 115 } 116 } 117 118 // NewJournal initializes a logbook owned by a single author, reading any 119 // existing data at the given filesystem location. 120 // logbooks are encrypted at rest with the given private key 121 func NewJournal(owner profile.Profile, bus event.Publisher, fs qfs.Filesystem, fsLocation string) (*Book, error) { 122 ctx := context.Background() 123 if owner.PrivKey == nil { 124 return nil, fmt.Errorf("logbook: private key is required") 125 } 126 if fs == nil { 127 return nil, fmt.Errorf("logbook: filesystem is required") 128 } 129 if fsLocation == "" { 130 return nil, fmt.Errorf("logbook: location is required") 131 } 132 if bus == nil { 133 return nil, fmt.Errorf("logbook: event.Bus is required") 134 } 135 136 book := &Book{ 137 store: &oplog.Journal{}, 138 fs: fs, 139 owner: &owner, 140 fsLocation: fsLocation, 141 publisher: bus, 142 } 143 144 if err := book.load(ctx); err != nil { 145 if err == ErrNotFound { 146 err = book.initialize(ctx) 147 return book, err 148 } 149 return nil, err 150 } 151 152 return book, nil 153 } 154 155 // NewJournalOverwriteWithProfile initializes a new logbook using the given 156 // profile. Any existing logbook will be overwritten. 157 func NewJournalOverwriteWithProfile(owner profile.Profile, bus event.Publisher, fs qfs.Filesystem, fsLocation string) (*Book, error) { 158 log.Debugw("NewJournalOverwriteWithProfile", "owner", owner) 159 ctx := context.Background() 160 if owner.PrivKey == nil { 161 return nil, fmt.Errorf("logbook: private key is required") 162 } 163 if fs == nil { 164 return nil, fmt.Errorf("logbook: filesystem is required") 165 } 166 if fsLocation == "" { 167 return nil, fmt.Errorf("logbook: location is required") 168 } 169 if owner.ID.Encode() == "" { 170 return nil, fmt.Errorf("logbook: profileID is required") 171 } 172 if bus == nil { 173 return nil, fmt.Errorf("logbook: event.Bus is required") 174 } 175 176 book := &Book{ 177 store: &oplog.Journal{}, 178 owner: &owner, 179 fs: fs, 180 fsLocation: fsLocation, 181 publisher: bus, 182 } 183 184 err := book.initialize(ctx) 185 return book, err 186 } 187 188 // Owner provides the profile that owns the logbook 189 func (book *Book) Owner() *profile.Profile { 190 return book.owner 191 } 192 193 func (book *Book) initialize(ctx context.Context) error { 194 log.Debug("intializing book", "owner", book.owner.ID.Encode()) 195 // initialize owner's log of user actions 196 ownerOplog := oplog.InitLog(oplog.Op{ 197 Type: oplog.OpTypeInit, 198 Model: UserModel, 199 Name: book.owner.Peername, 200 AuthorID: book.owner.ID.Encode(), 201 Timestamp: NewTimestamp(), 202 }) 203 204 if err := book.store.MergeLog(ctx, ownerOplog); err != nil { 205 return err 206 } 207 208 return book.save(ctx, &UserLog{l: ownerOplog}, nil) 209 } 210 211 // ReplaceAll replaces the contents of the logbook with the provided log data 212 func (book *Book) ReplaceAll(ctx context.Context, lg *oplog.Log) error { 213 log.Debugw("ReplaceAll", "log", lg) 214 err := book.store.ReplaceAll(ctx, lg) 215 if err != nil { 216 return err 217 } 218 return book.save(ctx, nil, nil) 219 } 220 221 // logPutter is an interface for transactional log updates. updates provided 222 // to PutLog should always be complete, rooted log hierarchies 223 type logPutter interface { 224 PutLog(ctx context.Context, l *oplog.Log) error 225 } 226 227 // save writes the book to book.fsLocation, if a non-nil authorLog is provided 228 // save tries to write a transactional update 229 func (book *Book) save(ctx context.Context, authorLog *UserLog, blog *BranchLog) (err error) { 230 if authorLog != nil { 231 if lp, ok := book.store.(logPutter); ok { 232 if err := lp.PutLog(ctx, authorLog.l); err != nil { 233 return err 234 } 235 } 236 } else { 237 if blog != nil { 238 if lp, ok := book.store.(logPutter); ok { 239 240 if err := lp.PutLog(ctx, blog.l); err != nil { 241 return err 242 } 243 } 244 } 245 } 246 247 if al, ok := book.store.(oplog.AuthorLogstore); ok { 248 ciphertext, err := al.FlatbufferCipher(book.owner.PrivKey) 249 if err != nil { 250 return err 251 } 252 253 file := qfs.NewMemfileBytes(book.fsLocation, ciphertext) 254 book.fsLocation, err = book.fs.Put(ctx, file) 255 log.Debugw("saved author logbook", "err", err) 256 return err 257 } 258 return err 259 } 260 261 // load reads the book dataset from book.fsLocation 262 func (book *Book) load(ctx context.Context) error { 263 if al, ok := book.store.(oplog.AuthorLogstore); ok { 264 f, err := book.fs.Get(ctx, book.fsLocation) 265 if err != nil { 266 if strings.Contains(err.Error(), "not found") { 267 return ErrNotFound 268 } 269 return err 270 } 271 272 ciphertext, err := ioutil.ReadAll(f) 273 if err != nil { 274 return err 275 } 276 277 return al.UnmarshalFlatbufferCipher(ctx, book.owner.PrivKey, ciphertext) 278 } 279 return nil 280 } 281 282 // WriteAuthorRename adds an operation updating the author's username 283 func (book *Book) WriteAuthorRename(ctx context.Context, author *profile.Profile, newName string) error { 284 log.Debugw("WriteAuthorRename", "author", author, "newName", newName) 285 if book == nil { 286 return ErrNoLogbook 287 } 288 if !dsref.IsValidName(newName) { 289 return fmt.Errorf("logbook: author name %q invalid", newName) 290 } 291 292 authorLog, err := book.userLog(ctx, author.ID.Encode()) 293 if err != nil { 294 return err 295 } 296 297 // TODO (b5): check write access! 298 299 authorLog.Append(oplog.Op{ 300 Type: oplog.OpTypeAmend, 301 Model: UserModel, 302 // on the user branch we always use the author's encoded profileID 303 AuthorID: author.ID.Encode(), 304 Name: newName, 305 Timestamp: NewTimestamp(), 306 }) 307 308 if err := book.save(ctx, authorLog, nil); err != nil { 309 return err 310 } 311 312 if author.ID.Encode() == book.owner.ID.Encode() { 313 book.owner.Peername = newName 314 } 315 return nil 316 } 317 318 // WriteDatasetInit initializes a new dataset name 319 func (book *Book) WriteDatasetInit(ctx context.Context, author *profile.Profile, dsName string) (string, error) { 320 if book == nil { 321 return "", ErrNoLogbook 322 } 323 if dsName == "" { 324 return "", fmt.Errorf("logbook: name is required to initialize a dataset") 325 } 326 if !dsref.IsValidName(dsName) { 327 return "", fmt.Errorf("logbook: dataset name %q invalid", dsName) 328 } 329 330 ref := dsref.Ref{Username: author.Peername, Name: dsName} 331 if dsLog, err := book.DatasetRef(ctx, ref); err == nil { 332 // check for "blank" logs, and remove them 333 if len(dsLog.Ops) == 1 && len(dsLog.Logs) == 1 && len(dsLog.Logs[0].Ops) == 1 { 334 log.Debugw("removing stranded reference", "ref", ref) 335 if err := book.RemoveLog(ctx, ref); err != nil { 336 return "", fmt.Errorf("logbook: removing stray log: %w", err) 337 } 338 } else { 339 return "", fmt.Errorf("logbook: dataset named %q already exists", dsName) 340 } 341 } 342 343 profileID := author.ID.Encode() 344 authorLog, err := book.userLog(ctx, profileID) 345 if err != nil { 346 return "", err 347 } 348 authorLogID := authorLog.l.ID() 349 350 log.Debugw("initializing dataset", "profileID", profileID, "username", author.Peername, "name", dsName, "authorLogID", authorLogID) 351 dsLog := oplog.InitLog(oplog.Op{ 352 Type: oplog.OpTypeInit, 353 Model: DatasetModel, 354 AuthorID: authorLogID, 355 Name: dsName, 356 Timestamp: NewTimestamp(), 357 }) 358 359 branch := oplog.InitLog(oplog.Op{ 360 Type: oplog.OpTypeInit, 361 Model: BranchModel, 362 AuthorID: authorLogID, 363 Name: DefaultBranchName, 364 Timestamp: NewTimestamp(), 365 }) 366 367 dsLog.AddChild(branch) 368 authorLog.AddChild(dsLog) 369 initID := dsLog.ID() 370 371 err = book.publisher.Publish(ctx, event.ETDatasetNameInit, dsref.VersionInfo{ 372 InitID: initID, 373 Username: author.Peername, 374 ProfileID: profileID, 375 Name: dsName, 376 }) 377 if err != nil { 378 log.Error(err) 379 } 380 381 return initID, book.save(ctx, authorLog, nil) 382 } 383 384 // WriteDatasetRename marks renaming a dataset 385 func (book *Book) WriteDatasetRename(ctx context.Context, author *profile.Profile, initID string, newName string) error { 386 if book == nil { 387 return ErrNoLogbook 388 } 389 if !dsref.IsValidName(newName) { 390 return fmt.Errorf("logbook: new dataset name %q invalid", newName) 391 } 392 393 dsLog, err := book.datasetLog(ctx, initID) 394 if err != nil { 395 return err 396 } 397 398 if err := book.hasWriteAccess(ctx, dsLog.l, author); err != nil { 399 return err 400 } 401 402 oldName := dsLog.l.Name() 403 log.Debugw("WriteDatasetRename", "author.ID", author.ID.Encode(), "author.Peername", author.Peername, "initID", initID, "oldName", oldName, "newName", newName) 404 405 dsLog.Append(oplog.Op{ 406 Type: oplog.OpTypeAmend, 407 Model: DatasetModel, 408 Name: newName, 409 Timestamp: NewTimestamp(), 410 }) 411 412 err = book.publisher.Publish(ctx, event.ETDatasetRename, event.DsRename{ 413 InitID: initID, 414 OldName: oldName, 415 NewName: newName, 416 }) 417 if err != nil { 418 log.Error(err) 419 } 420 421 authorLog, err := book.userLog(ctx, author.ID.Encode()) 422 if err != nil { 423 return err 424 } 425 426 authorLog.AddChild(dsLog.l) 427 428 return book.save(ctx, authorLog, nil) 429 } 430 431 // RefToInitID converts a dsref to an initID by iterating the entire logbook looking for a match. 432 // This function is inefficient, iterating the entire set of operations in a log. Replacing this 433 // function call with mechanisms in dscache will fix this problem. 434 // TODO(dustmop): Don't depend on this function permanently, use a higher level resolver and 435 // convert all callers of this function to use that resolver's initID instead of converting a 436 // dsref yet again. 437 func (book *Book) RefToInitID(ref dsref.Ref) (string, error) { 438 if book == nil { 439 return "", ErrNoLogbook 440 } 441 442 // NOTE: Bad to retrieve the background context here, but HeadRef just ignores it anyway. 443 ctx := context.Background() 444 445 // HeadRef is inefficient, iterates the top two levels of the logbook. 446 // Runs in O(M*N) where M = number of users, N = number of datasets per user. 447 dsLog, err := book.store.HeadRef(ctx, ref.Username, ref.Name) 448 if err != nil { 449 if err == oplog.ErrNotFound { 450 return "", ErrNotFound 451 } 452 return "", err 453 } 454 return dsLog.ID(), nil 455 } 456 457 // Return a strongly typed UserLog for the given profileID. Top level of the logbook. 458 func (book Book) userLog(ctx context.Context, profileID string) (*UserLog, error) { 459 lg, err := book.store.GetAuthorID(ctx, UserModel, profileID) 460 if err != nil { 461 log.Debugw("fetch userLog", "profileID", profileID, "err", err) 462 return nil, err 463 } 464 return newUserLog(lg), nil 465 } 466 467 // Return a strongly typed DatasetLog. Uses DatasetModel model. 468 func (book *Book) datasetLog(ctx context.Context, initID string) (*DatasetLog, error) { 469 lg, err := book.store.Get(ctx, initID) 470 if err != nil { 471 return nil, err 472 } 473 return newDatasetLog(lg), nil 474 } 475 476 // Return a strongly typed BranchLog 477 func (book *Book) branchLog(ctx context.Context, initID string) (*BranchLog, error) { 478 lg, err := book.store.Get(ctx, initID) 479 if err != nil { 480 return nil, err 481 } 482 if len(lg.Logs) != 1 { 483 return nil, fmt.Errorf("expected dataset to have 1 branch, has %d", len(lg.Logs)) 484 } 485 return newBranchLog(lg.Logs[0]), nil 486 } 487 488 // ProfileCanWrite is a utility to check whether a given profile 489 // has write access to a given dataset by initID 490 func (book *Book) ProfileCanWrite(ctx context.Context, initID string, pro *profile.Profile) error { 491 log, err := book.branchLog(ctx, initID) 492 if err != nil { 493 if err == oplog.ErrNotFound { 494 return nil 495 } 496 return err 497 } 498 ul, err := book.userLog(ctx, pro.ID.Encode()) 499 if err != nil { 500 return err 501 } 502 503 if log.l.Ops[0].AuthorID != ul.l.ID() { 504 return fmt.Errorf("%w: you do not have write access", ErrAccessDenied) 505 } 506 return nil 507 } 508 509 // hasWriteAccess is a simple author-matching check 510 func (book *Book) hasWriteAccess(ctx context.Context, log *oplog.Log, pro *profile.Profile) error { 511 ul, err := book.userLog(ctx, pro.ID.Encode()) 512 if err != nil { 513 return err 514 } 515 516 if log.Ops[0].AuthorID != ul.l.ID() { 517 return fmt.Errorf("%w: you do not have write access", ErrAccessDenied) 518 } 519 return nil 520 } 521 522 // WriteDatasetDeleteAll closes a dataset, marking it as deleted 523 func (book *Book) WriteDatasetDeleteAll(ctx context.Context, pro *profile.Profile, initID string) error { 524 if book == nil { 525 return ErrNoLogbook 526 } 527 log.Debugw("WriteDatasetDeleteAll", "initID", initID) 528 529 dsLog, err := book.datasetLog(ctx, initID) 530 if err != nil { 531 return err 532 } 533 534 if err := book.hasWriteAccess(ctx, dsLog.l, pro); err != nil { 535 return err 536 } 537 538 dsLog.Append(oplog.Op{ 539 Type: oplog.OpTypeRemove, 540 Model: DatasetModel, 541 Timestamp: NewTimestamp(), 542 }) 543 544 err = book.publisher.Publish(ctx, event.ETDatasetDeleteAll, initID) 545 if err != nil { 546 log.Error(err) 547 } 548 549 return book.save(ctx, nil, nil) 550 } 551 552 // WriteVersionSave adds 1 or 2 operations marking the creation of a dataset 553 // version. If the run.State arg is nil only one commit operation is written 554 // 555 // If a run.State argument is non-nil two operations are written to the log, 556 // one op for the run followed by a commit op for the dataset save. 557 // If run.State is non-nil the dataset.Commit.RunID and rs.ID fields must match 558 func (book *Book) WriteVersionSave(ctx context.Context, author *profile.Profile, ds *dataset.Dataset, rs *run.State) error { 559 if book == nil { 560 return ErrNoLogbook 561 } 562 563 log.Debugw("WriteVersionSave", "authorID", author.ID.Encode(), "initID", ds.ID) 564 branchLog, err := book.branchLog(ctx, ds.ID) 565 if err != nil { 566 return err 567 } 568 569 if err := book.hasWriteAccess(ctx, branchLog.l, author); err != nil { 570 return err 571 } 572 573 if rs != nil { 574 if rs.ID != ds.Commit.RunID { 575 return fmt.Errorf("dataset.Commit.RunID does not match the provided run.ID") 576 } 577 book.appendTransformRun(branchLog, rs) 578 } 579 580 book.appendVersionSave(branchLog, ds) 581 // TODO(dlong): Think about how to handle a failure exactly here, what needs to be rolled back? 582 err = book.save(ctx, nil, branchLog) 583 if err != nil { 584 return err 585 } 586 587 info := dsref.ConvertDatasetToVersionInfo(ds) 588 commitCount := int64(0) 589 for _, op := range branchLog.Ops() { 590 if op.Model == CommitModel { 591 switch op.Type { 592 case oplog.OpTypeInit: 593 commitCount++ 594 case oplog.OpTypeAmend: 595 continue 596 case oplog.OpTypeRemove: 597 commitCount = commitCount - op.Size 598 } 599 } 600 } 601 info.CommitCount = int(commitCount) 602 if rs != nil { 603 info.RunID = rs.ID 604 info.RunDuration = rs.Duration 605 info.RunStatus = string(rs.Status) 606 } 607 608 if err = book.publisher.Publish(ctx, event.ETLogbookWriteCommit, info); err != nil { 609 log.Error(err) 610 } 611 612 return nil 613 } 614 615 // WriteTransformRun adds an operation to a log marking the execution of a 616 // dataset transform script 617 func (book *Book) WriteTransformRun(ctx context.Context, author *profile.Profile, initID string, rs *run.State) error { 618 if book == nil { 619 return ErrNoLogbook 620 } 621 if rs == nil { 622 return fmt.Errorf("run state is required") 623 } 624 625 log.Debugw("WriteTransformRun", "author.ID", author.ID.Encode(), "initID", initID, "runState.ID", rs.ID, "runState.Status", rs.Status) 626 branchLog, err := book.branchLog(ctx, initID) 627 if err != nil { 628 return err 629 } 630 631 if err := book.hasWriteAccess(ctx, branchLog.l, author); err != nil { 632 return err 633 } 634 635 book.appendTransformRun(branchLog, rs) 636 vi := dsref.VersionInfo{ 637 InitID: initID, 638 RunID: rs.ID, 639 RunStatus: string(rs.Status), 640 RunDuration: rs.Duration, 641 RunStart: rs.StartTime, 642 } 643 if err = book.publisher.Publish(ctx, event.ETLogbookWriteRun, vi); err != nil { 644 log.Error(err) 645 } 646 // TODO(dlong): Think about how to handle a failure exactly here, what needs to be rolled back? 647 return book.save(ctx, nil, branchLog) 648 } 649 650 func (book *Book) appendVersionSave(blog *BranchLog, ds *dataset.Dataset) int { 651 op := oplog.Op{ 652 Type: oplog.OpTypeInit, 653 Model: CommitModel, 654 Ref: ds.Path, 655 Prev: ds.PreviousPath, 656 657 Timestamp: ds.Commit.Timestamp.UnixNano(), 658 Note: ds.Commit.Title, 659 } 660 661 if ds.Structure != nil { 662 op.Size = int64(ds.Structure.Length) 663 } 664 if ds.Commit.RunID != "" { 665 op.Relations = []string{fmt.Sprintf("%s%s", runIDRelPrefix, ds.Commit.RunID)} 666 } 667 668 blog.Append(op) 669 670 return blog.Size() - 1 671 } 672 673 // appendTransformRun maps fields from run.State to an operation. 674 func (book *Book) appendTransformRun(blog *BranchLog, rs *run.State) int { 675 op := oplog.Op{ 676 Type: oplog.OpTypeInit, 677 Model: RunModel, 678 Ref: rs.ID, 679 Name: fmt.Sprintf("%d", rs.Number), 680 681 Size: int64(rs.Duration), 682 Note: string(rs.Status), 683 } 684 685 if rs.StartTime != nil { 686 op.Timestamp = rs.StartTime.UnixNano() 687 } 688 689 blog.Append(op) 690 691 return blog.Size() - 1 692 } 693 694 // WriteVersionAmend adds an operation to a log when a dataset amends a commit 695 // TODO(dustmop): Currently unused by codebase, only called in tests. 696 func (book *Book) WriteVersionAmend(ctx context.Context, author *profile.Profile, ds *dataset.Dataset) error { 697 if book == nil { 698 return ErrNoLogbook 699 } 700 log.Debugf("WriteVersionAmend: '%s'", ds.ID) 701 702 branchLog, err := book.branchLog(ctx, ds.ID) 703 if err != nil { 704 return err 705 } 706 if err := book.hasWriteAccess(ctx, branchLog.l, author); err != nil { 707 return err 708 } 709 710 branchLog.Append(oplog.Op{ 711 Type: oplog.OpTypeAmend, 712 Model: CommitModel, 713 Ref: ds.Path, 714 Prev: ds.PreviousPath, 715 716 Timestamp: ds.Commit.Timestamp.UnixNano(), 717 Note: ds.Commit.Title, 718 }) 719 720 return book.save(ctx, nil, branchLog) 721 } 722 723 // WriteVersionDelete adds an operation to a log marking a number of sequential 724 // versions from HEAD as deleted. Because logs are append-only, deletes are 725 // recorded as "tombstone" operations that mark removal. 726 func (book *Book) WriteVersionDelete(ctx context.Context, author *profile.Profile, initID string, revisions int) error { 727 if book == nil { 728 return ErrNoLogbook 729 } 730 log.Debugf("WriteVersionDelete: %s, revisions: %d", initID, revisions) 731 732 branchLog, err := book.branchLog(ctx, initID) 733 if err != nil { 734 return err 735 } 736 if err := book.hasWriteAccess(ctx, branchLog.l, author); err != nil { 737 return err 738 } 739 740 branchLog.Append(oplog.Op{ 741 Type: oplog.OpTypeRemove, 742 Model: CommitModel, 743 Size: int64(revisions), 744 // TODO (b5) - finish 745 }) 746 747 // Calculate the commits after collapsing deletions found at the tail of history (most recent). 748 items := branchToVersionInfos(branchLog, dsref.Ref{}, false) 749 750 if len(items) > 0 { 751 lastItem := items[len(items)-1] 752 lastItem.InitID = initID 753 lastItem.CommitCount = len(items) 754 755 if err = book.publisher.Publish(ctx, event.ETLogbookWriteCommit, lastItem); err != nil { 756 log.Error(err) 757 } 758 } 759 760 return book.save(ctx, nil, nil) 761 } 762 763 // WriteRemotePush adds an operation to a log marking the publication of a 764 // number of versions to a remote address. It returns a rollback function that 765 // removes the operation when called 766 func (book *Book) WriteRemotePush(ctx context.Context, author *profile.Profile, initID string, revisions int, remoteAddr string) (l *oplog.Log, rollback func(context.Context) error, err error) { 767 if book == nil { 768 return nil, nil, ErrNoLogbook 769 } 770 log.Debugf("WriteRemotePush: %s, revisions: %d, remote: %q", initID, revisions, remoteAddr) 771 772 branchLog, err := book.branchLog(ctx, initID) 773 if err != nil { 774 return nil, nil, err 775 } 776 if err := book.hasWriteAccess(ctx, branchLog.l, author); err != nil { 777 return nil, nil, err 778 } 779 780 branchLog.Append(oplog.Op{ 781 Type: oplog.OpTypeInit, 782 Model: PushModel, 783 Timestamp: NewTimestamp(), 784 Size: int64(revisions), 785 Relations: []string{remoteAddr}, 786 }) 787 788 if err = book.save(ctx, nil, nil); err != nil { 789 return nil, nil, err 790 } 791 792 var ( 793 rollbackOnce sync.Once 794 rollbackError error 795 ) 796 // after successful save calling rollback drops the written push operation 797 rollback = func(ctx context.Context) error { 798 rollbackOnce.Do(func() { 799 branchLog, err := book.branchLog(ctx, initID) 800 if err != nil { 801 rollbackError = err 802 return 803 } 804 805 // TODO (b5) - the fact that this works means accessors are passing data that 806 // if modified will be persisted on save, which may be a *major* source of 807 // bugs if not handled correctly by packages that read & save logbook data 808 // we should consider returning copies, and adding explicit methods for 809 // modification. 810 branchLog.l.Ops = branchLog.l.Ops[:len(branchLog.l.Ops)-1] 811 rollbackError = book.save(ctx, nil, nil) 812 }) 813 return rollbackError 814 } 815 816 sparseLog, err := book.UserDatasetBranchesLog(ctx, initID) 817 if err != nil { 818 rollback(ctx) 819 return nil, rollback, err 820 } 821 822 return sparseLog, rollback, nil 823 } 824 825 // WriteRemoteDelete adds an operation to a log marking an unpublish request for 826 // a count of sequential versions from HEAD 827 func (book *Book) WriteRemoteDelete(ctx context.Context, author *profile.Profile, initID string, revisions int, remoteAddr string) (l *oplog.Log, rollback func(ctx context.Context) error, err error) { 828 if book == nil { 829 return nil, nil, ErrNoLogbook 830 } 831 log.Debugf("WriteRemoteDelete: %s, revisions: %d, remote: %q", initID, revisions, remoteAddr) 832 833 branchLog, err := book.branchLog(ctx, initID) 834 if err != nil { 835 return nil, nil, err 836 } 837 if err := book.hasWriteAccess(ctx, branchLog.l, author); err != nil { 838 return nil, nil, err 839 } 840 841 branchLog.Append(oplog.Op{ 842 Type: oplog.OpTypeRemove, 843 Model: PushModel, 844 Timestamp: NewTimestamp(), 845 Size: int64(revisions), 846 Relations: []string{remoteAddr}, 847 }) 848 849 if err = book.save(ctx, nil, nil); err != nil { 850 return nil, nil, err 851 } 852 853 var ( 854 rollbackOnce sync.Once 855 rollbackError error 856 ) 857 // after successful save calling rollback drops the written push operation 858 rollback = func(ctx context.Context) error { 859 rollbackOnce.Do(func() { 860 branchLog, err := book.branchLog(ctx, initID) 861 if err != nil { 862 rollbackError = err 863 return 864 } 865 branchLog.l.Ops = branchLog.l.Ops[:len(branchLog.l.Ops)-1] 866 rollbackError = book.save(ctx, nil, nil) 867 }) 868 return rollbackError 869 } 870 871 sparseLog, err := book.UserDatasetBranchesLog(ctx, initID) 872 if err != nil { 873 rollback(ctx) 874 return nil, rollback, err 875 } 876 877 return sparseLog, rollback, nil 878 } 879 880 // ListAllLogs lists all of the logs in the logbook 881 func (book Book) ListAllLogs(ctx context.Context) ([]*oplog.Log, error) { 882 return book.store.Logs(ctx, 0, -1) 883 } 884 885 // AllReferencedDatasetPaths scans an entire logbook looking for dataset paths 886 func (book *Book) AllReferencedDatasetPaths(ctx context.Context) (map[string]struct{}, error) { 887 paths := map[string]struct{}{} 888 logs, err := book.ListAllLogs(ctx) 889 if err != nil { 890 return nil, err 891 } 892 893 for _, l := range logs { 894 addReferencedPaths(l, paths) 895 } 896 return paths, nil 897 } 898 899 func addReferencedPaths(log *oplog.Log, paths map[string]struct{}) { 900 ps := []string{} 901 for _, op := range log.Ops { 902 if op.Model == CommitModel { 903 switch op.Type { 904 case oplog.OpTypeInit: 905 ps = append(ps, op.Ref) 906 case oplog.OpTypeRemove: 907 ps = ps[:len(ps)-int(op.Size)] 908 case oplog.OpTypeAmend: 909 ps[len(ps)-1] = op.Ref 910 } 911 } 912 } 913 for _, p := range ps { 914 paths[p] = struct{}{} 915 } 916 917 for _, l := range log.Logs { 918 addReferencedPaths(l, paths) 919 } 920 } 921 922 // Log gets a log for a given ID 923 func (book Book) Log(ctx context.Context, id string) (*oplog.Log, error) { 924 return book.store.Get(ctx, id) 925 } 926 927 // ResolveRef completes missing data in a dataset reference, populating 928 // the human alias if given an initID, or an initID if given a human alias 929 // implements resolve.NameResolver interface 930 func (book *Book) ResolveRef(ctx context.Context, ref *dsref.Ref) (string, error) { 931 if book == nil { 932 return "", dsref.ErrRefNotFound 933 } 934 935 // if given an initID, populate the rest of the reference 936 if ref.InitID != "" { 937 got, err := book.Ref(ctx, ref.InitID) 938 if err != nil { 939 return "", err 940 } 941 *ref = got 942 return "", nil 943 } 944 945 initID, err := book.RefToInitID(*ref) 946 if err != nil { 947 return "", dsref.ErrRefNotFound 948 } 949 ref.InitID = initID 950 951 var branchLog *BranchLog 952 if ref.Path == "" { 953 log.Debugw("finding branch log", "initID", initID) 954 branchLog, err = book.branchLog(ctx, initID) 955 if err != nil { 956 return "", err 957 } 958 log.Debugw("found branch log", "initID", initID, "size", branchLog.Size(), "latestSavePath", book.latestSavePath(branchLog.l)) 959 ref.Path = book.latestSavePath(branchLog.l) 960 } 961 962 if ref.ProfileID == "" { 963 if branchLog == nil { 964 branchLog, err = book.branchLog(ctx, initID) 965 if err != nil { 966 return "", err 967 } 968 } 969 970 authorLog, err := book.store.Get(ctx, branchLog.l.Author()) 971 if err != nil { 972 return "", err 973 } 974 ref.ProfileID = authorLog.Ops[0].AuthorID 975 } 976 977 return "", nil 978 } 979 980 // Ref looks up a reference by InitID 981 func (book *Book) Ref(ctx context.Context, initID string) (dsref.Ref, error) { 982 ref := dsref.Ref{ 983 InitID: initID, 984 } 985 986 datasetLog, err := book.datasetLog(ctx, initID) 987 if err != nil { 988 if errors.Is(err, oplog.ErrNotFound) { 989 return ref, dsref.ErrRefNotFound 990 } 991 return ref, err 992 } 993 ref.Name = datasetLog.l.Name() 994 995 branchLog, err := book.branchLog(ctx, initID) 996 if err != nil { 997 if errors.Is(err, oplog.ErrNotFound) { 998 return ref, dsref.ErrRefNotFound 999 } 1000 return ref, err 1001 } 1002 ref.Path = book.latestSavePath(branchLog.l) 1003 1004 authorLog, err := book.store.Get(ctx, branchLog.l.Author()) 1005 if err != nil { 1006 return ref, err 1007 } 1008 ref.ProfileID = authorLog.Ops[0].AuthorID 1009 ref.Username = authorLog.Head().Name 1010 return ref, nil 1011 } 1012 1013 func (book *Book) latestSavePath(branchLog *oplog.Log) string { 1014 removes := 0 1015 1016 for i := len(branchLog.Ops) - 1; i >= 0; i-- { 1017 op := branchLog.Ops[i] 1018 if op.Model == CommitModel { 1019 switch op.Type { 1020 case oplog.OpTypeRemove: 1021 removes += int(op.Size) 1022 case oplog.OpTypeInit, oplog.OpTypeAmend: 1023 if removes > 0 { 1024 removes-- 1025 } 1026 if removes == 0 { 1027 return op.Ref 1028 } 1029 } 1030 } 1031 } 1032 return "" 1033 } 1034 1035 // UserDatasetBranchesLog gets a user's log and a dataset reference. 1036 // the returned log will be a user log with only one dataset log containing all 1037 // known branches: 1038 // user 1039 // dataset 1040 // branch 1041 // branch 1042 // ... 1043 func (book Book) UserDatasetBranchesLog(ctx context.Context, datasetInitID string) (*oplog.Log, error) { 1044 log.Debugf("UserDatasetBranchesLog datasetInitID=%q", datasetInitID) 1045 if datasetInitID == "" { 1046 return nil, fmt.Errorf("%w: cannot use the empty string as an init id", ErrNotFound) 1047 } 1048 1049 dsLog, err := oplog.GetWithSparseAncestorsAllDescendants(ctx, book.store, datasetInitID) 1050 if err != nil { 1051 log.Debugf("store error=%q datasetInitID=%q", err, datasetInitID) 1052 return nil, err 1053 } 1054 1055 return dsLog.Parent(), nil 1056 } 1057 1058 // DatasetRef gets a dataset log and all branches. Dataset logs describe 1059 // activity affecting an entire dataset. Things like dataset name changes and 1060 // access control changes are kept in the dataset log 1061 // 1062 // currently all logs are hardcoded to only accept one branch name. This 1063 // function will always return a single branch 1064 // 1065 // TODO(dustmop): Do not add new callers to this, transition away (preferring datasetLog instead), 1066 // and delete it. 1067 func (book Book) DatasetRef(ctx context.Context, ref dsref.Ref) (*oplog.Log, error) { 1068 if ref.Username == "" { 1069 return nil, fmt.Errorf("logbook: ref.Username is required") 1070 } 1071 if ref.Name == "" { 1072 return nil, fmt.Errorf("logbook: ref.Name is required") 1073 } 1074 1075 return book.store.HeadRef(ctx, ref.Username, ref.Name) 1076 } 1077 1078 // BranchRef gets a branch log for a dataset reference. Branch logs describe 1079 // a line of commits 1080 // 1081 // currently all logs are hardcoded to only accept one branch name. This 1082 // function always returns 1083 // 1084 // TODO(dustmop): Do not add new callers to this, transition away (preferring branchLog instead), 1085 // and delete it. 1086 func (book Book) BranchRef(ctx context.Context, ref dsref.Ref) (*oplog.Log, error) { 1087 if ref.Username == "" { 1088 return nil, fmt.Errorf("logbook: ref.Username is required") 1089 } 1090 if ref.Name == "" { 1091 return nil, fmt.Errorf("logbook: ref.Name is required") 1092 } 1093 1094 return book.store.HeadRef(ctx, ref.Username, ref.Name, DefaultBranchName) 1095 } 1096 1097 // LogBytes signs a log and writes it to a flatbuffer 1098 func (book Book) LogBytes(log *oplog.Log, signingKey crypto.PrivKey) ([]byte, error) { 1099 if err := log.Sign(signingKey); err != nil { 1100 return nil, err 1101 } 1102 return log.FlatbufferBytes(), nil 1103 } 1104 1105 // DsrefAliasForLog parses log data into a dataset alias reference, populating 1106 // only the username, name, and profileID the dataset. 1107 // the passed in oplog must refer unambiguously to a dataset or branch. 1108 // book.Log() returns exact log references 1109 func DsrefAliasForLog(log *oplog.Log) (dsref.Ref, error) { 1110 ref := dsref.Ref{} 1111 if log == nil { 1112 return ref, fmt.Errorf("logbook: log is required") 1113 } 1114 if log.Model() != UserModel { 1115 return ref, fmt.Errorf("logbook: log isn't rooted as an author") 1116 } 1117 if len(log.Logs) != 1 { 1118 return ref, fmt.Errorf("logbook: ambiguous dataset reference") 1119 } 1120 1121 ref = dsref.Ref{ 1122 Username: log.Name(), 1123 Name: log.Logs[0].Name(), 1124 ProfileID: log.FirstOpAuthorID(), 1125 } 1126 1127 return ref, nil 1128 } 1129 1130 // MergeLog adds a log to the logbook, merging with any existing log data 1131 func (book *Book) MergeLog(ctx context.Context, sender crypto.PubKey, lg *oplog.Log) error { 1132 if book == nil { 1133 return ErrNoLogbook 1134 } 1135 // eventually access control will dictate which logs can be written by whom. 1136 // For now we only allow users to merge logs they've written 1137 // book will need access to a store of public keys before we can verify 1138 // signatures non-same-senders 1139 if err := lg.Verify(sender); err != nil { 1140 return err 1141 } 1142 1143 if err := book.store.MergeLog(ctx, lg); err != nil { 1144 return err 1145 } 1146 1147 return book.save(ctx, nil, nil) 1148 } 1149 1150 // RemoveLog removes an entire log from a logbook 1151 func (book *Book) RemoveLog(ctx context.Context, ref dsref.Ref) error { 1152 if book == nil { 1153 return ErrNoLogbook 1154 } 1155 book.store.RemoveLog(ctx, dsRefToLogPath(ref)...) 1156 return book.save(ctx, nil, nil) 1157 } 1158 1159 func dsRefToLogPath(ref dsref.Ref) (path []string) { 1160 for _, str := range []string{ 1161 ref.Username, 1162 ref.Name, 1163 } { 1164 path = append(path, str) 1165 } 1166 return path 1167 } 1168 1169 // ConstructDatasetLog creates a sparse log from a connected dataset history 1170 // where no prior log exists 1171 // the given history MUST be ordered from oldest to newest commits 1172 // TODO (b5) - this presently only works for datasets in an author's user 1173 // namespace 1174 func (book *Book) ConstructDatasetLog(ctx context.Context, author *profile.Profile, ref dsref.Ref, history []*dataset.Dataset) error { 1175 if book == nil { 1176 return ErrNoLogbook 1177 } 1178 1179 if _, err := book.RefToInitID(ref); err == nil { 1180 // if the log already exists, it will either as-or-more rich than this log, 1181 // refuse to overwrite 1182 return ErrLogTooShort 1183 } 1184 1185 initID, err := book.WriteDatasetInit(ctx, author, ref.Name) 1186 if err != nil { 1187 return err 1188 } 1189 branchLog, err := book.branchLog(ctx, initID) 1190 if err != nil { 1191 return err 1192 } 1193 for _, ds := range history { 1194 book.appendVersionSave(branchLog, ds) 1195 } 1196 return book.save(ctx, nil, nil) 1197 } 1198 1199 func commitOpRunID(op oplog.Op) string { 1200 for _, str := range op.Relations { 1201 if strings.HasPrefix(str, runIDRelPrefix) { 1202 return strings.TrimPrefix(str, runIDRelPrefix) 1203 } 1204 } 1205 return "" 1206 } 1207 1208 func versionInfoFromOp(ref dsref.Ref, op oplog.Op) dsref.VersionInfo { 1209 return dsref.VersionInfo{ 1210 Username: ref.Username, 1211 ProfileID: ref.ProfileID, 1212 Name: ref.Name, 1213 Path: op.Ref, 1214 CommitTime: time.Unix(0, op.Timestamp), 1215 BodySize: int(op.Size), 1216 CommitTitle: op.Note, 1217 } 1218 } 1219 1220 func runItemFromOp(ref dsref.Ref, op oplog.Op) dsref.VersionInfo { 1221 runStart := time.Unix(0, op.Timestamp) 1222 return dsref.VersionInfo{ 1223 Username: ref.Username, 1224 ProfileID: ref.ProfileID, 1225 Name: ref.Name, 1226 RunStart: &runStart, 1227 RunID: op.Ref, 1228 RunStatus: op.Note, 1229 RunDuration: int64(op.Size), 1230 // TODO(B5): When using qrimatic, I'd like to store the run number as a 1231 // name string here, but we currently don't have a way to plumb a run number 1232 // down from the qrimatic scheduler 1233 // RunNumber: strconv.ParseInt(op.Name), 1234 } 1235 } 1236 1237 func addCommitDetailsToRunItem(li dsref.VersionInfo, op oplog.Op) dsref.VersionInfo { 1238 li.CommitTime = time.Unix(0, op.Timestamp) 1239 li.CommitTitle = op.Note 1240 li.BodySize = int(op.Size) 1241 li.Path = op.Ref 1242 return li 1243 } 1244 1245 // Items collapses the history of a dataset branch into linear log items 1246 func (book Book) Items(ctx context.Context, ref dsref.Ref, offset, limit int, term string) ([]dsref.VersionInfo, error) { 1247 initID, err := book.RefToInitID(dsref.Ref{Username: ref.Username, Name: ref.Name}) 1248 if err != nil { 1249 return nil, err 1250 } 1251 branchLog, err := book.branchLog(ctx, initID) 1252 if err != nil { 1253 return nil, err 1254 } 1255 1256 return filteredBranchToVersionInfos(branchLog, ref, offset, limit, term, true), nil 1257 } 1258 1259 // ConvertLogsToVersionInfos collapses the history of a dataset branch into linear log items 1260 func ConvertLogsToVersionInfos(l *oplog.Log, ref dsref.Ref) []dsref.VersionInfo { 1261 return branchToVersionInfos(newBranchLog(l), ref, true) 1262 } 1263 1264 // filteredBranchToVersionInfos filters and paginates a branchLog as a list of 1265 // VersionInfos. If collapseAllDeletes is true, all delete operations will remove 1266 // the refs before them. Otherwise, only refs at the end of history will be removed 1267 // in this manner. 1268 // TODO (ramfox): this is not the "optimal" way of doing filtering on the log, since 1269 // this version requires iterating over the full list after it has already been 1270 // generated. Can refactor for better performance (examining the log Model as we 1271 // iterate) in the future 1272 func filteredBranchToVersionInfos(blog *BranchLog, ref dsref.Ref, offset, limit int, term string, collapseAllDeletes bool) []dsref.VersionInfo { 1273 refs := branchToVersionInfos(blog, ref, collapseAllDeletes) 1274 filteredRefs := []dsref.VersionInfo{} 1275 1276 // TODO (ramfox): when we learn what other potential things a user could want 1277 // to filter for, let's create a type & solidify the language 1278 switch term { 1279 case "history": 1280 for _, ref := range refs { 1281 if ref.Path != "" { 1282 filteredRefs = append(filteredRefs, ref) 1283 } 1284 refs = filteredRefs 1285 } 1286 case "run": 1287 for _, ref := range refs { 1288 if ref.RunID != "" { 1289 filteredRefs = append(filteredRefs, ref) 1290 } 1291 refs = filteredRefs 1292 } 1293 } 1294 1295 if offset > len(refs) { 1296 offset = len(refs) 1297 } 1298 refs = refs[offset:] 1299 1300 if limit < len(refs) && limit >= 0 { 1301 refs = refs[:limit] 1302 } 1303 1304 return refs 1305 } 1306 1307 // branchToVersionInfos collapses the history of a dataset branch into linear log items 1308 // If collapseAllDeletes is true, all delete operations will remove the refs before them. Otherwise, 1309 // only refs at the end of history will be removed in this manner. 1310 func branchToVersionInfos(blog *BranchLog, ref dsref.Ref, collapseAllDeletes bool) []dsref.VersionInfo { 1311 refs := []dsref.VersionInfo{} 1312 deleteAtEnd := 0 1313 for _, op := range blog.Ops() { 1314 switch op.Model { 1315 case CommitModel: 1316 switch op.Type { 1317 case oplog.OpTypeInit: 1318 // run operations & commit operations often occur next to each other in 1319 // the log. 1320 // if the last item in the slice has a runID that matches a runID resource 1321 // from this commit, combine them into one Log item that describes both 1322 // the run and the save 1323 commitRunID := commitOpRunID(op) 1324 if commitRunID != "" && len(refs) > 0 && commitRunID == refs[len(refs)-1].RunID { 1325 refs[len(refs)-1] = addCommitDetailsToRunItem(refs[len(refs)-1], op) 1326 } else { 1327 refs = append(refs, versionInfoFromOp(ref, op)) 1328 } 1329 case oplog.OpTypeAmend: 1330 deleteAtEnd = 0 1331 refs[len(refs)-1] = versionInfoFromOp(ref, op) 1332 case oplog.OpTypeRemove: 1333 if collapseAllDeletes { 1334 refs = refs[:len(refs)-int(op.Size)] 1335 } else { 1336 deleteAtEnd += int(op.Size) 1337 } 1338 } 1339 case RunModel: 1340 // runs are only ever "init" op type 1341 refs = append(refs, runItemFromOp(ref, op)) 1342 case PushModel: 1343 switch op.Type { 1344 case oplog.OpTypeInit: 1345 for i := 1; i <= int(op.Size); i++ { 1346 refs[len(refs)-i].Published = true 1347 } 1348 case oplog.OpTypeRemove: 1349 for i := 1; i <= int(op.Size); i++ { 1350 refs[len(refs)-i].Published = false 1351 } 1352 } 1353 } 1354 } 1355 1356 if deleteAtEnd > 0 { 1357 if deleteAtEnd < len(refs) { 1358 refs = refs[:len(refs)-deleteAtEnd] 1359 } else { 1360 refs = []dsref.VersionInfo{} 1361 } 1362 } 1363 1364 // reverse the slice, placing newest first 1365 // https://github.com/golang/go/wiki/SliceTricks#reversing 1366 for i := len(refs)/2 - 1; i >= 0; i-- { 1367 opp := len(refs) - 1 - i 1368 refs[i], refs[opp] = refs[opp], refs[i] 1369 } 1370 return refs 1371 } 1372 1373 // LogEntry is a simplified representation of a log operation 1374 type LogEntry struct { 1375 Timestamp time.Time 1376 Author string 1377 Action string 1378 Note string 1379 } 1380 1381 // String formats a LogEntry as a String 1382 func (l LogEntry) String() string { 1383 return fmt.Sprintf("%s\t%s\t%s\t%s", l.Timestamp.Format(time.Kitchen), l.Author, l.Action, l.Note) 1384 } 1385 1386 // LogEntries returns a summarized "line-by-line" representation of a log for a 1387 // given dataset reference 1388 func (book Book) LogEntries(ctx context.Context, ref dsref.Ref, offset, limit int) ([]LogEntry, error) { 1389 l, err := book.BranchRef(ctx, ref) 1390 if err != nil { 1391 return nil, err 1392 } 1393 1394 res := []LogEntry{} 1395 for _, op := range l.Ops { 1396 if offset > 0 { 1397 offset-- 1398 continue 1399 } 1400 res = append(res, logEntryFromOp(ref.Username, op)) 1401 if len(res) == limit { 1402 break 1403 } 1404 } 1405 1406 return res, nil 1407 } 1408 1409 var actionStrings = map[uint32][3]string{ 1410 UserModel: {"create profile", "update profile", "delete profile"}, 1411 DatasetModel: {"init dataset", "rename dataset", "delete dataset"}, 1412 BranchModel: {"init branch", "rename branch", "delete branch"}, 1413 CommitModel: {"save commit", "amend commit", "remove commit"}, 1414 PushModel: {"publish", "", "unpublish"}, 1415 ACLModel: {"update access", "update access", "remove all access"}, 1416 } 1417 1418 func logEntryFromOp(author string, op oplog.Op) LogEntry { 1419 note := op.Note 1420 if note == "" && op.Name != "" { 1421 note = op.Name 1422 } 1423 return LogEntry{ 1424 Timestamp: time.Unix(0, op.Timestamp), 1425 Author: author, 1426 Action: actionStrings[op.Model][int(op.Type)-1], 1427 Note: note, 1428 } 1429 } 1430 1431 // PlainLogs returns plain-old-data representations of the logs, intended for serialization 1432 func (book Book) PlainLogs(ctx context.Context) ([]PlainLog, error) { 1433 raw, err := book.store.Logs(ctx, 0, -1) 1434 if err != nil { 1435 return nil, err 1436 } 1437 1438 logs := make([]PlainLog, len(raw)) 1439 for i, l := range raw { 1440 logs[i] = NewPlainLog(l) 1441 } 1442 return logs, nil 1443 } 1444 1445 // SummaryString prints the entire hierarchy of logbook model/ID/opcount/name in 1446 // a single string 1447 func (book Book) SummaryString(ctx context.Context) string { 1448 logs, err := book.store.Logs(ctx, 0, -1) 1449 if err != nil { 1450 return fmt.Sprintf("error getting diagnostics: %q", err) 1451 } 1452 1453 builder := &strings.Builder{} 1454 for _, user := range logs { 1455 builder.WriteString(fmt.Sprintf("%s %s %d %s\n", ModelString(user.Model()), user.ID(), len(user.Ops), user.Name())) 1456 for _, dataset := range user.Logs { 1457 builder.WriteString(fmt.Sprintf(" %s %s %d %s\n", ModelString(dataset.Model()), dataset.ID(), len(dataset.Ops), dataset.Name())) 1458 for _, branch := range dataset.Logs { 1459 builder.WriteString(fmt.Sprintf(" %s %s %d %s\n", ModelString(branch.Model()), branch.ID(), len(branch.Ops), branch.Name())) 1460 } 1461 } 1462 } 1463 1464 return builder.String() 1465 } 1466 1467 // PlainLog is a human-oriented representation of oplog.Log intended for serialization 1468 type PlainLog struct { 1469 Ops []PlainOp `json:"ops,omitempty"` 1470 Logs []PlainLog `json:"logs,omitempty"` 1471 } 1472 1473 // NewPlainLog converts an oplog to a plain log 1474 func NewPlainLog(lg *oplog.Log) PlainLog { 1475 if lg == nil { 1476 return PlainLog{} 1477 } 1478 1479 ops := make([]PlainOp, len(lg.Ops)) 1480 for i, o := range lg.Ops { 1481 ops[i] = newPlainOp(o) 1482 } 1483 1484 var ls []PlainLog 1485 if len(lg.Logs) > 0 { 1486 ls = make([]PlainLog, len(lg.Logs)) 1487 for i, l := range lg.Logs { 1488 ls[i] = NewPlainLog(l) 1489 } 1490 } 1491 1492 return PlainLog{ 1493 Ops: ops, 1494 Logs: ls, 1495 } 1496 } 1497 1498 // PlainOp is a human-oriented representation of oplog.Op intended for serialization 1499 type PlainOp struct { 1500 // type of operation 1501 Type string `json:"type,omitempty"` 1502 // data model to operate on 1503 Model string `json:"model,omitempty"` 1504 // identifier of data this operation is documenting 1505 Ref string `json:"ref,omitempty"` 1506 // previous reference in a causal history 1507 Prev string `json:"prev,omitempty"` 1508 // references this operation relates to. usage is operation type-dependant 1509 Relations []string `json:"relations,omitempty"` 1510 // human-readable name for the reference 1511 Name string `json:"name,omitempty"` 1512 // identifier for author 1513 AuthorID string `json:"authorID,omitempty"` 1514 // operation timestamp, for annotation purposes only 1515 Timestamp time.Time `json:"timestamp,omitempty"` 1516 // size of the referenced value in bytes 1517 Size int64 `json:"size,omitempty"` 1518 // operation annotation for users. eg: commit title 1519 Note string `json:"note,omitempty"` 1520 } 1521 1522 func newPlainOp(op oplog.Op) PlainOp { 1523 return PlainOp{ 1524 Type: opTypeString(op.Type), 1525 Model: ModelString(op.Model), 1526 Ref: op.Ref, 1527 Prev: op.Prev, 1528 Relations: op.Relations, 1529 Name: op.Name, 1530 AuthorID: op.AuthorID, 1531 Timestamp: time.Unix(0, op.Timestamp), 1532 Size: op.Size, 1533 Note: op.Note, 1534 } 1535 } 1536 1537 func opTypeString(op oplog.OpType) string { 1538 switch op { 1539 case oplog.OpTypeInit: 1540 return "init" 1541 case oplog.OpTypeAmend: 1542 return "amend" 1543 case oplog.OpTypeRemove: 1544 return "remove" 1545 default: 1546 return "" 1547 } 1548 } 1549 1550 func refFromDataset(ds *dataset.Dataset) dsref.Ref { 1551 return dsref.Ref{ 1552 Username: ds.Peername, 1553 ProfileID: ds.ProfileID, 1554 Name: ds.Name, 1555 Path: ds.Path, 1556 } 1557 }