github.com/decred/dcrlnd@v0.7.6/channeldb/migration_01_to_11/migrations_test.go (about) 1 package migration_01_to_11 2 3 import ( 4 "bytes" 5 "crypto/sha256" 6 "encoding/binary" 7 "fmt" 8 "math/rand" 9 "reflect" 10 "testing" 11 "time" 12 13 "github.com/davecgh/go-spew/spew" 14 "github.com/decred/dcrd/dcrutil/v4" 15 lnwire "github.com/decred/dcrlnd/channeldb/migration/lnwire21" 16 "github.com/decred/dcrlnd/kvdb" 17 "github.com/decred/dcrlnd/lntypes" 18 "github.com/go-errors/errors" 19 ) 20 21 // TestPaymentStatusesMigration checks that already completed payments will have 22 // their payment statuses set to Completed after the migration. 23 func TestPaymentStatusesMigration(t *testing.T) { 24 t.Parallel() 25 26 fakePayment := makeFakePayment() 27 paymentHash := sha256.Sum256(fakePayment.PaymentPreimage[:]) 28 29 // Add fake payment to test database, verifying that it was created, 30 // that we have only one payment, and its status is not "Completed". 31 beforeMigrationFunc := func(d *DB) { 32 if err := d.addPayment(fakePayment); err != nil { 33 t.Fatalf("unable to add payment: %v", err) 34 } 35 36 payments, err := d.fetchAllPayments() 37 if err != nil { 38 t.Fatalf("unable to fetch payments: %v", err) 39 } 40 41 if len(payments) != 1 { 42 t.Fatalf("wrong qty of paymets: expected 1, got %v", 43 len(payments)) 44 } 45 46 paymentStatus, err := d.fetchPaymentStatus(paymentHash) 47 if err != nil { 48 t.Fatalf("unable to fetch payment status: %v", err) 49 } 50 51 // We should receive default status if we have any in database. 52 if paymentStatus != StatusUnknown { 53 t.Fatalf("wrong payment status: expected %v, got %v", 54 StatusUnknown.String(), paymentStatus.String()) 55 } 56 57 // Lastly, we'll add a locally-sourced circuit and 58 // non-locally-sourced circuit to the circuit map. The 59 // locally-sourced payment should end up with an InFlight 60 // status, while the other should remain unchanged, which 61 // defaults to Grounded. 62 err = kvdb.Update(d, func(tx kvdb.RwTx) error { 63 circuits, err := tx.CreateTopLevelBucket( 64 []byte("circuit-adds"), 65 ) 66 if err != nil { 67 return err 68 } 69 70 groundedKey := make([]byte, 16) 71 binary.BigEndian.PutUint64(groundedKey[:8], 1) 72 binary.BigEndian.PutUint64(groundedKey[8:], 1) 73 74 // Generated using TestHalfCircuitSerialization with nil 75 // ErrorEncrypter, which is the case for locally-sourced 76 // payments. No payment status should end up being set 77 // for this circuit, since the short channel id of the 78 // key is non-zero (e.g., a forwarded circuit). This 79 // will default it to Grounded. 80 groundedCircuit := []byte{ 81 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 82 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 83 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 84 0x00, 0x01, 85 // start payment hash 86 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 87 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 88 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 89 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 90 // end payment hash 91 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 92 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 93 0x42, 0x40, 0x00, 94 } 95 96 err = circuits.Put(groundedKey, groundedCircuit) 97 if err != nil { 98 return err 99 } 100 101 inFlightKey := make([]byte, 16) 102 binary.BigEndian.PutUint64(inFlightKey[:8], 0) 103 binary.BigEndian.PutUint64(inFlightKey[8:], 1) 104 105 // Generated using TestHalfCircuitSerialization with nil 106 // ErrorEncrypter, which is not the case for forwarded 107 // payments, but should have no impact on the 108 // correctness of the test. The payment status for this 109 // circuit should be set to InFlight, since the short 110 // channel id in the key is 0 (sourceHop). 111 inFlightCircuit := []byte{ 112 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 113 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 114 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 115 0x00, 0x01, 116 // start payment hash 117 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 118 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 119 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 120 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 121 // end payment hash 122 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 123 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 124 0x42, 0x40, 0x00, 125 } 126 127 return circuits.Put(inFlightKey, inFlightCircuit) 128 }, func() {}) 129 if err != nil { 130 t.Fatalf("unable to add circuit map entry: %v", err) 131 } 132 } 133 134 // Verify that the created payment status is "Completed" for our one 135 // fake payment. 136 afterMigrationFunc := func(d *DB) { 137 // Check that our completed payments were migrated. 138 paymentStatus, err := d.fetchPaymentStatus(paymentHash) 139 if err != nil { 140 t.Fatalf("unable to fetch payment status: %v", err) 141 } 142 143 if paymentStatus != StatusSucceeded { 144 t.Fatalf("wrong payment status: expected %v, got %v", 145 StatusSucceeded.String(), paymentStatus.String()) 146 } 147 148 inFlightHash := [32]byte{ 149 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 150 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 151 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 153 } 154 155 // Check that the locally sourced payment was transitioned to 156 // InFlight. 157 paymentStatus, err = d.fetchPaymentStatus(inFlightHash) 158 if err != nil { 159 t.Fatalf("unable to fetch payment status: %v", err) 160 } 161 162 if paymentStatus != StatusInFlight { 163 t.Fatalf("wrong payment status: expected %v, got %v", 164 StatusInFlight.String(), paymentStatus.String()) 165 } 166 167 groundedHash := [32]byte{ 168 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 169 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 170 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 172 } 173 174 // Check that non-locally sourced payments remain in the default 175 // Grounded state. 176 paymentStatus, err = d.fetchPaymentStatus(groundedHash) 177 if err != nil { 178 t.Fatalf("unable to fetch payment status: %v", err) 179 } 180 181 if paymentStatus != StatusUnknown { 182 t.Fatalf("wrong payment status: expected %v, got %v", 183 StatusUnknown.String(), paymentStatus.String()) 184 } 185 } 186 187 applyMigration(t, 188 beforeMigrationFunc, 189 afterMigrationFunc, 190 PaymentStatusesMigration, 191 false) 192 } 193 194 // TestMigrateOptionalChannelCloseSummaryFields properly converts a 195 // ChannelCloseSummary to the v7 format, where optional fields have their 196 // presence indicated with boolean markers. 197 func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) { 198 t.Parallel() 199 200 chanState, err := createTestChannelState(nil) 201 if err != nil { 202 t.Fatalf("unable to create channel state: %v", err) 203 } 204 205 var chanPointBuf bytes.Buffer 206 err = writeOutpoint(&chanPointBuf, &chanState.FundingOutpoint) 207 if err != nil { 208 t.Fatalf("unable to write outpoint: %v", err) 209 } 210 211 chanID := chanPointBuf.Bytes() 212 213 testCases := []struct { 214 closeSummary *ChannelCloseSummary 215 oldSerialization func(c *ChannelCloseSummary) []byte 216 }{ 217 { 218 // A close summary where none of the new fields are 219 // set. 220 closeSummary: &ChannelCloseSummary{ 221 ChanPoint: chanState.FundingOutpoint, 222 ShortChanID: chanState.ShortChanID(), 223 ChainHash: chanState.ChainHash, 224 ClosingTXID: testTx.TxHash(), 225 CloseHeight: 100, 226 RemotePub: chanState.IdentityPub, 227 Capacity: chanState.Capacity, 228 SettledBalance: dcrutil.Amount(50000), 229 CloseType: RemoteForceClose, 230 IsPending: true, 231 232 // The last fields will be unset. 233 RemoteCurrentRevocation: nil, 234 LocalChanConfig: ChannelConfig{}, 235 RemoteNextRevocation: nil, 236 }, 237 238 // In the old format the last field written is the 239 // IsPendingField. It should be converted by adding an 240 // extra boolean marker at the end to indicate that the 241 // remaining fields are not there. 242 oldSerialization: func(cs *ChannelCloseSummary) []byte { 243 var buf bytes.Buffer 244 err := WriteElements(&buf, cs.ChanPoint, 245 cs.ShortChanID, cs.ChainHash, 246 cs.ClosingTXID, cs.CloseHeight, 247 cs.RemotePub, cs.Capacity, 248 cs.SettledBalance, cs.TimeLockedBalance, 249 cs.CloseType, cs.IsPending, 250 ) 251 if err != nil { 252 t.Fatal(err) 253 } 254 255 // For the old format, these are all the fields 256 // that are written. 257 return buf.Bytes() 258 }, 259 }, 260 { 261 // A close summary where the new fields are present, 262 // but the optional RemoteNextRevocation field is not 263 // set. 264 closeSummary: &ChannelCloseSummary{ 265 ChanPoint: chanState.FundingOutpoint, 266 ShortChanID: chanState.ShortChanID(), 267 ChainHash: chanState.ChainHash, 268 ClosingTXID: testTx.TxHash(), 269 CloseHeight: 100, 270 RemotePub: chanState.IdentityPub, 271 Capacity: chanState.Capacity, 272 SettledBalance: dcrutil.Amount(50000), 273 CloseType: RemoteForceClose, 274 IsPending: true, 275 RemoteCurrentRevocation: chanState.RemoteCurrentRevocation, 276 LocalChanConfig: chanState.LocalChanCfg, 277 278 // RemoteNextRevocation is optional, and here 279 // it is not set. 280 RemoteNextRevocation: nil, 281 }, 282 283 // In the old format the last field written is the 284 // LocalChanConfig. This indicates that the optional 285 // RemoteNextRevocation field is not present. It should 286 // be converted by adding boolean markers for all these 287 // fields. 288 oldSerialization: func(cs *ChannelCloseSummary) []byte { 289 var buf bytes.Buffer 290 err := WriteElements(&buf, cs.ChanPoint, 291 cs.ShortChanID, cs.ChainHash, 292 cs.ClosingTXID, cs.CloseHeight, 293 cs.RemotePub, cs.Capacity, 294 cs.SettledBalance, cs.TimeLockedBalance, 295 cs.CloseType, cs.IsPending, 296 ) 297 if err != nil { 298 t.Fatal(err) 299 } 300 301 err = WriteElements(&buf, cs.RemoteCurrentRevocation) 302 if err != nil { 303 t.Fatal(err) 304 } 305 306 err = writeChanConfig(&buf, &cs.LocalChanConfig) 307 if err != nil { 308 t.Fatal(err) 309 } 310 311 // RemoteNextRevocation is not written. 312 return buf.Bytes() 313 }, 314 }, 315 { 316 // A close summary where all fields are present. 317 closeSummary: &ChannelCloseSummary{ 318 ChanPoint: chanState.FundingOutpoint, 319 ShortChanID: chanState.ShortChanID(), 320 ChainHash: chanState.ChainHash, 321 ClosingTXID: testTx.TxHash(), 322 CloseHeight: 100, 323 RemotePub: chanState.IdentityPub, 324 Capacity: chanState.Capacity, 325 SettledBalance: dcrutil.Amount(50000), 326 CloseType: RemoteForceClose, 327 IsPending: true, 328 RemoteCurrentRevocation: chanState.RemoteCurrentRevocation, 329 LocalChanConfig: chanState.LocalChanCfg, 330 331 // RemoteNextRevocation is optional, and in 332 // this case we set it. 333 RemoteNextRevocation: chanState.RemoteNextRevocation, 334 }, 335 336 // In the old format all the fields are written. It 337 // should be converted by adding boolean markers for 338 // all these fields. 339 oldSerialization: func(cs *ChannelCloseSummary) []byte { 340 var buf bytes.Buffer 341 err := WriteElements(&buf, cs.ChanPoint, 342 cs.ShortChanID, cs.ChainHash, 343 cs.ClosingTXID, cs.CloseHeight, 344 cs.RemotePub, cs.Capacity, 345 cs.SettledBalance, cs.TimeLockedBalance, 346 cs.CloseType, cs.IsPending, 347 ) 348 if err != nil { 349 t.Fatal(err) 350 } 351 352 err = WriteElements(&buf, cs.RemoteCurrentRevocation) 353 if err != nil { 354 t.Fatal(err) 355 } 356 357 err = writeChanConfig(&buf, &cs.LocalChanConfig) 358 if err != nil { 359 t.Fatal(err) 360 } 361 362 err = WriteElements(&buf, cs.RemoteNextRevocation) 363 if err != nil { 364 t.Fatal(err) 365 } 366 367 return buf.Bytes() 368 }, 369 }, 370 } 371 372 for _, test := range testCases { 373 374 // Before the migration we must add the old format to the DB. 375 beforeMigrationFunc := func(d *DB) { 376 377 // Get the old serialization format for this test's 378 // close summary, and it to the closed channel bucket. 379 old := test.oldSerialization(test.closeSummary) 380 err = kvdb.Update(d, func(tx kvdb.RwTx) error { 381 closedChanBucket, err := tx.CreateTopLevelBucket( 382 closedChannelBucket, 383 ) 384 if err != nil { 385 return err 386 } 387 return closedChanBucket.Put(chanID, old) 388 }, func() {}) 389 if err != nil { 390 t.Fatalf("unable to add old serialization: %v", 391 err) 392 } 393 } 394 395 // After the migration it should be found in the new format. 396 afterMigrationFunc := func(d *DB) { 397 // We generate the new serialized version, to check 398 // against what is found in the DB. 399 var b bytes.Buffer 400 err = serializeChannelCloseSummary(&b, test.closeSummary) 401 if err != nil { 402 t.Fatalf("unable to serialize: %v", err) 403 } 404 newSerialization := b.Bytes() 405 406 var dbSummary []byte 407 err = kvdb.View(d, func(tx kvdb.RTx) error { 408 closedChanBucket := tx.ReadBucket(closedChannelBucket) 409 if closedChanBucket == nil { 410 return errors.New("unable to find bucket") 411 } 412 413 // Get the serialized verision from the DB and 414 // make sure it matches what we expected. 415 dbSummary = closedChanBucket.Get(chanID) 416 if !bytes.Equal(dbSummary, newSerialization) { 417 return fmt.Errorf("unexpected new " + 418 "serialization") 419 } 420 return nil 421 }, func() { 422 dbSummary = nil 423 }) 424 if err != nil { 425 t.Fatalf("unable to view DB: %v", err) 426 } 427 428 // Finally we fetch the deserialized summary from the 429 // DB and check that it is equal to our original one. 430 dbChannels, err := d.FetchClosedChannels(false) 431 if err != nil { 432 t.Fatalf("unable to fetch closed channels: %v", 433 err) 434 } 435 436 if len(dbChannels) != 1 { 437 t.Fatalf("expected 1 closed channels, found %v", 438 len(dbChannels)) 439 } 440 441 dbChan := dbChannels[0] 442 if !reflect.DeepEqual(dbChan, test.closeSummary) { 443 t.Fatalf("not equal: %v vs %v", 444 spew.Sdump(dbChan), 445 spew.Sdump(test.closeSummary)) 446 } 447 448 } 449 450 applyMigration(t, 451 beforeMigrationFunc, 452 afterMigrationFunc, 453 MigrateOptionalChannelCloseSummaryFields, 454 false) 455 } 456 } 457 458 // TestMigrateGossipMessageStoreKeys ensures that the migration to the new 459 // gossip message store key format is successful/unsuccessful under various 460 // scenarios. 461 func TestMigrateGossipMessageStoreKeys(t *testing.T) { 462 t.Parallel() 463 464 // Construct the message which we'll use to test the migration, along 465 // with its old and new key formats. 466 shortChanID := lnwire.ShortChannelID{BlockHeight: 10} 467 msg := &lnwire.AnnounceSignatures{ShortChannelID: shortChanID} 468 469 var oldMsgKey [33 + 8]byte 470 copy(oldMsgKey[:33], pubKey.SerializeCompressed()) 471 binary.BigEndian.PutUint64(oldMsgKey[33:41], shortChanID.ToUint64()) 472 473 var newMsgKey [33 + 8 + 2]byte 474 copy(newMsgKey[:41], oldMsgKey[:]) 475 binary.BigEndian.PutUint16(newMsgKey[41:43], uint16(msg.MsgType())) 476 477 // Before the migration, we'll create the bucket where the messages 478 // should live and insert them. 479 beforeMigration := func(db *DB) { 480 var b bytes.Buffer 481 if err := msg.Encode(&b, 0); err != nil { 482 t.Fatalf("unable to serialize message: %v", err) 483 } 484 485 err := kvdb.Update(db, func(tx kvdb.RwTx) error { 486 messageStore, err := tx.CreateTopLevelBucket( 487 messageStoreBucket, 488 ) 489 if err != nil { 490 return err 491 } 492 493 return messageStore.Put(oldMsgKey[:], b.Bytes()) 494 }, func() {}) 495 if err != nil { 496 t.Fatal(err) 497 } 498 } 499 500 // After the migration, we'll make sure that: 501 // 1. We cannot find the message under its old key. 502 // 2. We can find the message under its new key. 503 // 3. The message matches the original. 504 afterMigration := func(db *DB) { 505 var rawMsg []byte 506 err := kvdb.View(db, func(tx kvdb.RTx) error { 507 messageStore := tx.ReadBucket(messageStoreBucket) 508 if messageStore == nil { 509 return errors.New("message store bucket not " + 510 "found") 511 } 512 rawMsg = messageStore.Get(oldMsgKey[:]) 513 if rawMsg != nil { 514 t.Fatal("expected to not find message under " + 515 "old key, but did") 516 } 517 rawMsg = messageStore.Get(newMsgKey[:]) 518 if rawMsg == nil { 519 return fmt.Errorf("expected to find message " + 520 "under new key, but didn't") 521 } 522 523 return nil 524 }, func() { 525 rawMsg = nil 526 }) 527 if err != nil { 528 t.Fatal(err) 529 } 530 531 gotMsg, err := lnwire.ReadMessage(bytes.NewReader(rawMsg), 0) 532 if err != nil { 533 t.Fatalf("unable to deserialize raw message: %v", err) 534 } 535 if !reflect.DeepEqual(msg, gotMsg) { 536 t.Fatalf("expected message: %v\ngot message: %v", 537 spew.Sdump(msg), spew.Sdump(gotMsg)) 538 } 539 } 540 541 applyMigration( 542 t, beforeMigration, afterMigration, 543 MigrateGossipMessageStoreKeys, false, 544 ) 545 } 546 547 // TestOutgoingPaymentsMigration checks that OutgoingPayments are migrated to a 548 // new bucket structure after the migration. 549 func TestOutgoingPaymentsMigration(t *testing.T) { 550 t.Parallel() 551 552 const numPayments = 4 553 var oldPayments []*outgoingPayment 554 555 // Add fake payments to test database, verifying that it was created. 556 beforeMigrationFunc := func(d *DB) { 557 for i := 0; i < numPayments; i++ { 558 var p *outgoingPayment 559 var err error 560 561 // We fill the database with random payments. For the 562 // very last one we'll use a duplicate of the first, to 563 // ensure we are able to handle migration from a 564 // database that has copies. 565 if i < numPayments-1 { 566 p, err = makeRandomFakePayment() 567 if err != nil { 568 t.Fatalf("unable to create payment: %v", 569 err) 570 } 571 } else { 572 p = oldPayments[0] 573 } 574 575 if err := d.addPayment(p); err != nil { 576 t.Fatalf("unable to add payment: %v", err) 577 } 578 579 oldPayments = append(oldPayments, p) 580 } 581 582 payments, err := d.fetchAllPayments() 583 if err != nil { 584 t.Fatalf("unable to fetch payments: %v", err) 585 } 586 587 if len(payments) != numPayments { 588 t.Fatalf("wrong qty of paymets: expected %d got %v", 589 numPayments, len(payments)) 590 } 591 } 592 593 // Verify that all payments were migrated. 594 afterMigrationFunc := func(d *DB) { 595 sentPayments, err := d.fetchPaymentsMigration9() 596 if err != nil { 597 t.Fatalf("unable to fetch sent payments: %v", err) 598 } 599 600 if len(sentPayments) != numPayments { 601 t.Fatalf("expected %d payments, got %d", numPayments, 602 len(sentPayments)) 603 } 604 605 graph := d.ChannelGraph() 606 sourceNode, err := graph.SourceNode() 607 if err != nil { 608 t.Fatalf("unable to fetch source node: %v", err) 609 } 610 611 for i, p := range sentPayments { 612 // The payment status should be Completed. 613 if p.Status != StatusSucceeded { 614 t.Fatalf("expected Completed, got %v", p.Status) 615 } 616 617 // Check that the sequence number is preserved. They 618 // start counting at 1. 619 if p.sequenceNum != uint64(i+1) { 620 t.Fatalf("expected seqnum %d, got %d", i, 621 p.sequenceNum) 622 } 623 624 // Order of payments should be be preserved. 625 old := oldPayments[i] 626 627 // Check the individial fields. 628 if p.Info.Value != old.Terms.Value { 629 t.Fatalf("value mismatch") 630 } 631 632 if p.Info.CreationDate != old.CreationDate { 633 t.Fatalf("date mismatch") 634 } 635 636 if !bytes.Equal(p.Info.PaymentRequest, old.PaymentRequest) { 637 t.Fatalf("payreq mismatch") 638 } 639 640 if *p.PaymentPreimage != old.PaymentPreimage { 641 t.Fatalf("preimage mismatch") 642 } 643 644 if p.Attempt.Route.TotalFees() != old.Fee { 645 t.Fatalf("Fee mismatch") 646 } 647 648 if p.Attempt.Route.TotalAmount != old.Fee+old.Terms.Value { 649 t.Fatalf("Total amount mismatch") 650 } 651 652 if p.Attempt.Route.TotalTimeLock != old.TimeLockLength { 653 t.Fatalf("timelock mismatch") 654 } 655 656 if p.Attempt.Route.SourcePubKey != sourceNode.PubKeyBytes { 657 t.Fatalf("source mismatch: %x vs %x", 658 p.Attempt.Route.SourcePubKey[:], 659 sourceNode.PubKeyBytes[:]) 660 } 661 662 for i, hop := range old.Path { 663 if hop != p.Attempt.Route.Hops[i].PubKeyBytes { 664 t.Fatalf("path mismatch") 665 } 666 } 667 } 668 669 // Finally, check that the payment sequence number is updated 670 // to reflect the migrated payments. 671 err = kvdb.Update(d, func(tx kvdb.RwTx) error { 672 payments := tx.ReadWriteBucket(paymentsRootBucket) 673 if payments == nil { 674 return fmt.Errorf("payments bucket not found") 675 } 676 677 seq := payments.Sequence() 678 if seq != numPayments { 679 return fmt.Errorf("expected sequence to be "+ 680 "%d, got %d", numPayments, seq) 681 } 682 683 return nil 684 }, func() {}) 685 if err != nil { 686 t.Fatal(err) 687 } 688 } 689 690 applyMigration(t, 691 beforeMigrationFunc, 692 afterMigrationFunc, 693 MigrateOutgoingPayments, 694 false) 695 } 696 697 func makeRandPaymentCreationInfo() (*PaymentCreationInfo, error) { 698 var payHash lntypes.Hash 699 if _, err := rand.Read(payHash[:]); err != nil { 700 return nil, err 701 } 702 703 return &PaymentCreationInfo{ 704 PaymentHash: payHash, 705 Value: lnwire.MilliAtom(rand.Int63()), 706 CreationDate: time.Now(), 707 PaymentRequest: []byte("test"), 708 }, nil 709 } 710 711 // TestPaymentRouteSerialization tests that we're able to properly migrate 712 // existing payments on disk that contain the traversed routes to the new 713 // routing format which supports the TLV payloads. We also test that the 714 // migration is able to handle duplicate payment attempts. 715 func TestPaymentRouteSerialization(t *testing.T) { 716 t.Parallel() 717 718 legacyHop1 := &Hop{ 719 PubKeyBytes: NewVertex(pub), 720 ChannelID: 12345, 721 OutgoingTimeLock: 111, 722 LegacyPayload: true, 723 AmtToForward: 555, 724 } 725 legacyHop2 := &Hop{ 726 PubKeyBytes: NewVertex(pub), 727 ChannelID: 12345, 728 OutgoingTimeLock: 111, 729 LegacyPayload: true, 730 AmtToForward: 555, 731 } 732 legacyRoute := Route{ 733 TotalTimeLock: 123, 734 TotalAmount: 1234567, 735 SourcePubKey: NewVertex(pub), 736 Hops: []*Hop{legacyHop1, legacyHop2}, 737 } 738 739 const numPayments = 4 740 var oldPayments []*Payment 741 742 sharedPayAttempt := PaymentAttemptInfo{ 743 PaymentID: 1, 744 SessionKey: priv, 745 Route: legacyRoute, 746 } 747 748 // We'll first add a series of fake payments, using the existing legacy 749 // serialization format. 750 beforeMigrationFunc := func(d *DB) { 751 err := kvdb.Update(d, func(tx kvdb.RwTx) error { 752 paymentsBucket, err := tx.CreateTopLevelBucket( 753 paymentsRootBucket, 754 ) 755 if err != nil { 756 t.Fatalf("unable to create new payments "+ 757 "bucket: %v", err) 758 } 759 760 for i := 0; i < numPayments; i++ { 761 var seqNum [8]byte 762 byteOrder.PutUint64(seqNum[:], uint64(i)) 763 764 // All payments will be randomly generated, 765 // other than the final payment. We'll force 766 // the final payment to re-use an existing 767 // payment hash so we can insert it into the 768 // duplicate payment hash bucket. 769 var payInfo *PaymentCreationInfo 770 if i < numPayments-1 { 771 payInfo, err = makeRandPaymentCreationInfo() 772 if err != nil { 773 t.Fatalf("unable to create "+ 774 "payment: %v", err) 775 } 776 } else { 777 payInfo = oldPayments[0].Info 778 } 779 780 // Next, legacy encoded when needed, we'll 781 // serialize the info and the attempt. 782 var payInfoBytes bytes.Buffer 783 err = serializePaymentCreationInfo( 784 &payInfoBytes, payInfo, 785 ) 786 if err != nil { 787 t.Fatalf("unable to encode pay "+ 788 "info: %v", err) 789 } 790 var payAttemptBytes bytes.Buffer 791 err = serializePaymentAttemptInfoLegacy( 792 &payAttemptBytes, &sharedPayAttempt, 793 ) 794 if err != nil { 795 t.Fatalf("unable to encode payment attempt: "+ 796 "%v", err) 797 } 798 799 // Before we write to disk, we'll need to fetch 800 // the proper bucket. If this is the duplicate 801 // payment, then we'll grab the dup bucket, 802 // otherwise, we'll use the top level bucket. 803 var payHashBucket kvdb.RwBucket 804 if i < numPayments-1 { 805 payHashBucket, err = paymentsBucket.CreateBucket( 806 payInfo.PaymentHash[:], 807 ) 808 if err != nil { 809 t.Fatalf("unable to create payments bucket: %v", err) 810 } 811 } else { 812 payHashBucket = paymentsBucket.NestedReadWriteBucket( 813 payInfo.PaymentHash[:], 814 ) 815 dupPayBucket, err := payHashBucket.CreateBucket( 816 paymentDuplicateBucket, 817 ) 818 if err != nil { 819 t.Fatalf("unable to create "+ 820 "dup hash bucket: %v", err) 821 } 822 823 payHashBucket, err = dupPayBucket.CreateBucket( 824 seqNum[:], 825 ) 826 if err != nil { 827 t.Fatalf("unable to make dup "+ 828 "bucket: %v", err) 829 } 830 } 831 832 err = payHashBucket.Put(paymentSequenceKey, seqNum[:]) 833 if err != nil { 834 t.Fatalf("unable to write seqno: %v", err) 835 } 836 837 err = payHashBucket.Put( 838 paymentCreationInfoKey, payInfoBytes.Bytes(), 839 ) 840 if err != nil { 841 t.Fatalf("unable to write creation "+ 842 "info: %v", err) 843 } 844 845 err = payHashBucket.Put( 846 paymentAttemptInfoKey, payAttemptBytes.Bytes(), 847 ) 848 if err != nil { 849 t.Fatalf("unable to write attempt "+ 850 "info: %v", err) 851 } 852 853 oldPayments = append(oldPayments, &Payment{ 854 Info: payInfo, 855 Attempt: &sharedPayAttempt, 856 }) 857 } 858 859 return nil 860 }, func() { 861 oldPayments = nil 862 }) 863 if err != nil { 864 t.Fatalf("unable to create test payments: %v", err) 865 } 866 } 867 868 afterMigrationFunc := func(d *DB) { 869 newPayments, err := d.FetchPayments() 870 if err != nil { 871 t.Fatalf("unable to fetch new payments: %v", err) 872 } 873 874 if len(newPayments) != numPayments { 875 t.Fatalf("expected %d payments, got %d", numPayments, 876 len(newPayments)) 877 } 878 879 for i, p := range newPayments { 880 // Order of payments should be be preserved. 881 old := oldPayments[i] 882 883 if p.Attempt.PaymentID != old.Attempt.PaymentID { 884 t.Fatalf("wrong pay ID: expected %v, got %v", 885 p.Attempt.PaymentID, 886 old.Attempt.PaymentID) 887 } 888 889 if p.Attempt.Route.TotalFees() != old.Attempt.Route.TotalFees() { 890 t.Fatalf("Fee mismatch") 891 } 892 893 if p.Attempt.Route.TotalAmount != old.Attempt.Route.TotalAmount { 894 t.Fatalf("Total amount mismatch") 895 } 896 897 if p.Attempt.Route.TotalTimeLock != old.Attempt.Route.TotalTimeLock { 898 t.Fatalf("timelock mismatch") 899 } 900 901 if p.Attempt.Route.SourcePubKey != old.Attempt.Route.SourcePubKey { 902 t.Fatalf("source mismatch: %x vs %x", 903 p.Attempt.Route.SourcePubKey[:], 904 old.Attempt.Route.SourcePubKey[:]) 905 } 906 907 for i, hop := range p.Attempt.Route.Hops { 908 if !reflect.DeepEqual(hop, legacyRoute.Hops[i]) { 909 t.Fatalf("hop mismatch") 910 } 911 } 912 } 913 } 914 915 applyMigration(t, 916 beforeMigrationFunc, 917 afterMigrationFunc, 918 MigrateRouteSerialization, 919 false) 920 } 921 922 // TestNotCoveredMigrations only references migrations that are not referenced 923 // anywhere else in this package. This prevents false positives when linting 924 // with unused. 925 func TestNotCoveredMigrations(t *testing.T) { 926 _ = MigrateNodeAndEdgeUpdateIndex 927 _ = MigrateInvoiceTimeSeries 928 _ = MigrateInvoiceTimeSeriesOutgoingPayments 929 _ = MigrateEdgePolicies 930 _ = MigratePruneEdgeUpdateIndex 931 }