github.com/decred/dcrlnd@v0.7.6/channeldb/migration_01_to_11/migrations.go (about)

     1  package migration_01_to_11
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/sha256"
     6  	"encoding/binary"
     7  	"fmt"
     8  
     9  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    10  	lnwire "github.com/decred/dcrlnd/channeldb/migration/lnwire21"
    11  	"github.com/decred/dcrlnd/kvdb"
    12  	"github.com/decred/dcrlnd/lntypes"
    13  )
    14  
    15  // MigrateNodeAndEdgeUpdateIndex is a migration function that will update the
    16  // database from version 0 to version 1. In version 1, we add two new indexes
    17  // (one for nodes and one for edges) to keep track of the last time a node or
    18  // edge was updated on the network. These new indexes allow us to implement the
    19  // new graph sync protocol added.
    20  func MigrateNodeAndEdgeUpdateIndex(tx kvdb.RwTx) error {
    21  	// First, we'll populating the node portion of the new index. Before we
    22  	// can add new values to the index, we'll first create the new bucket
    23  	// where these items will be housed.
    24  	nodes, err := tx.CreateTopLevelBucket(nodeBucket)
    25  	if err != nil {
    26  		return fmt.Errorf("unable to create node bucket: %v", err)
    27  	}
    28  	nodeUpdateIndex, err := nodes.CreateBucketIfNotExists(
    29  		nodeUpdateIndexBucket,
    30  	)
    31  	if err != nil {
    32  		return fmt.Errorf("unable to create node update index: %v", err)
    33  	}
    34  
    35  	log.Infof("Populating new node update index bucket")
    36  
    37  	// Now that we know the bucket has been created, we'll iterate over the
    38  	// entire node bucket so we can add the (updateTime || nodePub) key
    39  	// into the node update index.
    40  	err = nodes.ForEach(func(nodePub, nodeInfo []byte) error {
    41  		if len(nodePub) != 33 {
    42  			return nil
    43  		}
    44  
    45  		log.Tracef("Adding %x to node update index", nodePub)
    46  
    47  		// The first 8 bytes of a node's serialize data is the update
    48  		// time, so we can extract that without decoding the entire
    49  		// structure.
    50  		updateTime := nodeInfo[:8]
    51  
    52  		// Now that we have the update time, we can construct the key
    53  		// to insert into the index.
    54  		var indexKey [8 + 33]byte
    55  		copy(indexKey[:8], updateTime)
    56  		copy(indexKey[8:], nodePub)
    57  
    58  		return nodeUpdateIndex.Put(indexKey[:], nil)
    59  	})
    60  	if err != nil {
    61  		return fmt.Errorf("unable to update node indexes: %v", err)
    62  	}
    63  
    64  	log.Infof("Populating new edge update index bucket")
    65  
    66  	// With the set of nodes updated, we'll now update all edges to have a
    67  	// corresponding entry in the edge update index.
    68  	edges, err := tx.CreateTopLevelBucket(edgeBucket)
    69  	if err != nil {
    70  		return fmt.Errorf("unable to create edge bucket: %v", err)
    71  	}
    72  	edgeUpdateIndex, err := edges.CreateBucketIfNotExists(
    73  		edgeUpdateIndexBucket,
    74  	)
    75  	if err != nil {
    76  		return fmt.Errorf("unable to create edge update index: %v", err)
    77  	}
    78  
    79  	// We'll now run through each edge policy in the database, and update
    80  	// the index to ensure each edge has the proper record.
    81  	err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) error {
    82  		if len(edgeKey) != 41 {
    83  			return nil
    84  		}
    85  
    86  		// Now that we know this is the proper record, we'll grab the
    87  		// channel ID (last 8 bytes of the key), and then decode the
    88  		// edge policy so we can access the update time.
    89  		chanID := edgeKey[33:]
    90  		edgePolicyReader := bytes.NewReader(edgePolicyBytes)
    91  
    92  		edgePolicy, err := deserializeChanEdgePolicy(
    93  			edgePolicyReader, nodes,
    94  		)
    95  		if err != nil {
    96  			return err
    97  		}
    98  
    99  		log.Tracef("Adding chan_id=%v to edge update index",
   100  			edgePolicy.ChannelID)
   101  
   102  		// We'll now construct the index key using the channel ID, and
   103  		// the last time it was updated: (updateTime || chanID).
   104  		var indexKey [8 + 8]byte
   105  		byteOrder.PutUint64(
   106  			indexKey[:], uint64(edgePolicy.LastUpdate.Unix()),
   107  		)
   108  		copy(indexKey[8:], chanID)
   109  
   110  		return edgeUpdateIndex.Put(indexKey[:], nil)
   111  	})
   112  	if err != nil {
   113  		return fmt.Errorf("unable to update edge indexes: %v", err)
   114  	}
   115  
   116  	log.Infof("Migration to node and edge update indexes complete!")
   117  
   118  	return nil
   119  }
   120  
   121  // MigrateInvoiceTimeSeries is a database migration that assigns all existing
   122  // invoices an index in the add and/or the settle index. Additionally, all
   123  // existing invoices will have their bytes padded out in order to encode the
   124  // add+settle index as well as the amount paid.
   125  func MigrateInvoiceTimeSeries(tx kvdb.RwTx) error {
   126  	invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
   127  	if err != nil {
   128  		return err
   129  	}
   130  
   131  	addIndex, err := invoices.CreateBucketIfNotExists(
   132  		addIndexBucket,
   133  	)
   134  	if err != nil {
   135  		return err
   136  	}
   137  	settleIndex, err := invoices.CreateBucketIfNotExists(
   138  		settleIndexBucket,
   139  	)
   140  	if err != nil {
   141  		return err
   142  	}
   143  
   144  	log.Infof("Migrating invoice database to new time series format")
   145  
   146  	// Now that we have all the buckets we need, we'll run through each
   147  	// invoice in the database, and update it to reflect the new format
   148  	// expected post migration.
   149  	// NOTE: we store the converted invoices and put them back into the
   150  	// database after the loop, since modifying the bucket within the
   151  	// ForEach loop is not safe.
   152  	var invoicesKeys [][]byte
   153  	var invoicesValues [][]byte
   154  	err = invoices.ForEach(func(invoiceNum, invoiceBytes []byte) error {
   155  		// If this is a sub bucket, then we'll skip it.
   156  		if invoiceBytes == nil {
   157  			return nil
   158  		}
   159  
   160  		// First, we'll make a copy of the encoded invoice bytes.
   161  		invoiceBytesCopy := make([]byte, len(invoiceBytes))
   162  		copy(invoiceBytesCopy, invoiceBytes)
   163  
   164  		// With the bytes copied over, we'll append 24 additional
   165  		// bytes. We do this so we can decode the invoice under the new
   166  		// serialization format.
   167  		padding := bytes.Repeat([]byte{0}, 24)
   168  		invoiceBytesCopy = append(invoiceBytesCopy, padding...)
   169  
   170  		invoiceReader := bytes.NewReader(invoiceBytesCopy)
   171  		invoice, err := deserializeInvoiceLegacy(invoiceReader)
   172  		if err != nil {
   173  			return fmt.Errorf("unable to decode invoice: %v", err)
   174  		}
   175  
   176  		// Now that we have the fully decoded invoice, we can update
   177  		// the various indexes that we're added, and finally the
   178  		// invoice itself before re-inserting it.
   179  
   180  		// First, we'll get the new sequence in the addIndex in order
   181  		// to create the proper mapping.
   182  		nextAddSeqNo, err := addIndex.NextSequence()
   183  		if err != nil {
   184  			return err
   185  		}
   186  		var seqNoBytes [8]byte
   187  		byteOrder.PutUint64(seqNoBytes[:], nextAddSeqNo)
   188  		err = addIndex.Put(seqNoBytes[:], invoiceNum)
   189  		if err != nil {
   190  			return err
   191  		}
   192  
   193  		log.Tracef("Adding invoice (preimage=%x, add_index=%v) to add "+
   194  			"time series", invoice.Terms.PaymentPreimage[:],
   195  			nextAddSeqNo)
   196  
   197  		// Next, we'll check if the invoice has been settled or not. If
   198  		// so, then we'll also add it to the settle index.
   199  		var nextSettleSeqNo uint64
   200  		if invoice.Terms.State == ContractSettled {
   201  			nextSettleSeqNo, err = settleIndex.NextSequence()
   202  			if err != nil {
   203  				return err
   204  			}
   205  
   206  			var seqNoBytes [8]byte
   207  			byteOrder.PutUint64(seqNoBytes[:], nextSettleSeqNo)
   208  			err := settleIndex.Put(seqNoBytes[:], invoiceNum)
   209  			if err != nil {
   210  				return err
   211  			}
   212  
   213  			invoice.AmtPaid = invoice.Terms.Value
   214  
   215  			log.Tracef("Adding invoice (preimage=%x, "+
   216  				"settle_index=%v) to add time series",
   217  				invoice.Terms.PaymentPreimage[:],
   218  				nextSettleSeqNo)
   219  		}
   220  
   221  		// Finally, we'll update the invoice itself with the new
   222  		// indexing information as well as the amount paid if it has
   223  		// been settled or not.
   224  		invoice.AddIndex = nextAddSeqNo
   225  		invoice.SettleIndex = nextSettleSeqNo
   226  
   227  		// We've fully migrated an invoice, so we'll now update the
   228  		// invoice in-place.
   229  		var b bytes.Buffer
   230  		if err := serializeInvoiceLegacy(&b, &invoice); err != nil {
   231  			return err
   232  		}
   233  
   234  		// Save the key and value pending update for after the ForEach
   235  		// is done.
   236  		invoicesKeys = append(invoicesKeys, invoiceNum)
   237  		invoicesValues = append(invoicesValues, b.Bytes())
   238  		return nil
   239  	})
   240  	if err != nil {
   241  		return err
   242  	}
   243  
   244  	// Now put the converted invoices into the DB.
   245  	for i := range invoicesKeys {
   246  		key := invoicesKeys[i]
   247  		value := invoicesValues[i]
   248  		if err := invoices.Put(key, value); err != nil {
   249  			return err
   250  		}
   251  	}
   252  
   253  	log.Infof("Migration to invoice time series index complete!")
   254  
   255  	return nil
   256  }
   257  
   258  // MigrateInvoiceTimeSeriesOutgoingPayments is a follow up to the
   259  // migrateInvoiceTimeSeries migration. As at the time of writing, the
   260  // OutgoingPayment struct embeddeds an instance of the Invoice struct. As a
   261  // result, we also need to migrate the internal invoice to the new format.
   262  func MigrateInvoiceTimeSeriesOutgoingPayments(tx kvdb.RwTx) error {
   263  	payBucket := tx.ReadWriteBucket(paymentBucket)
   264  	if payBucket == nil {
   265  		return nil
   266  	}
   267  
   268  	log.Infof("Migrating invoice database to new outgoing payment format")
   269  
   270  	// We store the keys and values we want to modify since it is not safe
   271  	// to modify them directly within the ForEach loop.
   272  	var paymentKeys [][]byte
   273  	var paymentValues [][]byte
   274  	err := payBucket.ForEach(func(payID, paymentBytes []byte) error {
   275  		log.Tracef("Migrating payment %x", payID)
   276  
   277  		// The internal invoices for each payment only contain a
   278  		// populated contract term, and creation date, as a result,
   279  		// most of the bytes will be "empty".
   280  
   281  		// We'll calculate the end of the invoice index assuming a
   282  		// "minimal" index that's embedded within the greater
   283  		// OutgoingPayment. The breakdown is:
   284  		//  3 bytes empty var bytes, 16 bytes creation date, 16 bytes
   285  		//  settled date, 32 bytes payment pre-image, 8 bytes value, 1
   286  		//  byte settled.
   287  		endOfInvoiceIndex := 1 + 1 + 1 + 16 + 16 + 32 + 8 + 1
   288  
   289  		// We'll now extract the prefix of the pure invoice embedded
   290  		// within.
   291  		invoiceBytes := paymentBytes[:endOfInvoiceIndex]
   292  
   293  		// With the prefix extracted, we'll copy over the invoice, and
   294  		// also add padding for the new 24 bytes of fields, and finally
   295  		// append the remainder of the outgoing payment.
   296  		paymentCopy := make([]byte, len(invoiceBytes))
   297  		copy(paymentCopy, invoiceBytes)
   298  
   299  		padding := bytes.Repeat([]byte{0}, 24)
   300  		paymentCopy = append(paymentCopy, padding...)
   301  		paymentCopy = append(
   302  			paymentCopy, paymentBytes[endOfInvoiceIndex:]...,
   303  		)
   304  
   305  		// At this point, we now have the new format of the outgoing
   306  		// payments, we'll attempt to deserialize it to ensure the
   307  		// bytes are properly formatted.
   308  		paymentReader := bytes.NewReader(paymentCopy)
   309  		_, err := deserializeOutgoingPayment(paymentReader)
   310  		if err != nil {
   311  			return fmt.Errorf("unable to deserialize payment: %v", err)
   312  		}
   313  
   314  		// Now that we know the modifications was successful, we'll
   315  		// store it to our slice of keys and values, and write it back
   316  		// to disk in the new format after the ForEach loop is over.
   317  		paymentKeys = append(paymentKeys, payID)
   318  		paymentValues = append(paymentValues, paymentCopy)
   319  		return nil
   320  	})
   321  	if err != nil {
   322  		return err
   323  	}
   324  
   325  	// Finally store the updated payments to the bucket.
   326  	for i := range paymentKeys {
   327  		key := paymentKeys[i]
   328  		value := paymentValues[i]
   329  		if err := payBucket.Put(key, value); err != nil {
   330  			return err
   331  		}
   332  	}
   333  
   334  	log.Infof("Migration to outgoing payment invoices complete!")
   335  
   336  	return nil
   337  }
   338  
   339  // MigrateEdgePolicies is a migration function that will update the edges
   340  // bucket. It ensure that edges with unknown policies will also have an entry
   341  // in the bucket. After the migration, there will be two edge entries for
   342  // every channel, regardless of whether the policies are known.
   343  func MigrateEdgePolicies(tx kvdb.RwTx) error {
   344  	nodes := tx.ReadWriteBucket(nodeBucket)
   345  	if nodes == nil {
   346  		return nil
   347  	}
   348  
   349  	edges := tx.ReadWriteBucket(edgeBucket)
   350  	if edges == nil {
   351  		return nil
   352  	}
   353  
   354  	edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
   355  	if edgeIndex == nil {
   356  		return nil
   357  	}
   358  
   359  	// checkKey gets the policy from the database with a low-level call
   360  	// so that it is still possible to distinguish between unknown and
   361  	// not present.
   362  	checkKey := func(channelId uint64, keyBytes []byte) error {
   363  		var channelID [8]byte
   364  		byteOrder.PutUint64(channelID[:], channelId)
   365  
   366  		_, err := fetchChanEdgePolicy(edges,
   367  			channelID[:], keyBytes, nodes)
   368  
   369  		if err == ErrEdgeNotFound {
   370  			log.Tracef("Adding unknown edge policy present for node %x, channel %v",
   371  				keyBytes, channelId)
   372  
   373  			err := putChanEdgePolicyUnknown(edges, channelId, keyBytes)
   374  			if err != nil {
   375  				return err
   376  			}
   377  
   378  			return nil
   379  		}
   380  
   381  		return err
   382  	}
   383  
   384  	// Iterate over all channels and check both edge policies.
   385  	err := edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
   386  		infoReader := bytes.NewReader(edgeInfoBytes)
   387  		edgeInfo, err := deserializeChanEdgeInfo(infoReader)
   388  		if err != nil {
   389  			return err
   390  		}
   391  
   392  		for _, key := range [][]byte{edgeInfo.NodeKey1Bytes[:],
   393  			edgeInfo.NodeKey2Bytes[:]} {
   394  
   395  			if err := checkKey(edgeInfo.ChannelID, key); err != nil {
   396  				return err
   397  			}
   398  		}
   399  
   400  		return nil
   401  	})
   402  
   403  	if err != nil {
   404  		return fmt.Errorf("unable to update edge policies: %v", err)
   405  	}
   406  
   407  	log.Infof("Migration of edge policies complete!")
   408  
   409  	return nil
   410  }
   411  
   412  // PaymentStatusesMigration is a database migration intended for adding payment
   413  // statuses for each existing payment entity in bucket to be able control
   414  // transitions of statuses and prevent cases such as double payment
   415  func PaymentStatusesMigration(tx kvdb.RwTx) error {
   416  	// Get the bucket dedicated to storing statuses of payments,
   417  	// where a key is payment hash, value is payment status.
   418  	paymentStatuses, err := tx.CreateTopLevelBucket(paymentStatusBucket)
   419  	if err != nil {
   420  		return err
   421  	}
   422  
   423  	log.Infof("Migrating database to support payment statuses")
   424  
   425  	circuitAddKey := []byte("circuit-adds")
   426  	circuits := tx.ReadWriteBucket(circuitAddKey)
   427  	if circuits != nil {
   428  		log.Infof("Marking all known circuits with status InFlight")
   429  
   430  		err = circuits.ForEach(func(k, v []byte) error {
   431  			// Parse the first 8 bytes as the short chan ID for the
   432  			// circuit. We'll skip all short chan IDs are not
   433  			// locally initiated, which includes all non-zero short
   434  			// chan ids.
   435  			chanID := binary.BigEndian.Uint64(k[:8])
   436  			if chanID != 0 {
   437  				return nil
   438  			}
   439  
   440  			// The payment hash is the third item in the serialized
   441  			// payment circuit. The first two items are an AddRef
   442  			// (10 bytes) and the incoming circuit key (16 bytes).
   443  			const payHashOffset = 10 + 16
   444  
   445  			paymentHash := v[payHashOffset : payHashOffset+32]
   446  
   447  			return paymentStatuses.Put(
   448  				paymentHash, StatusInFlight.Bytes(),
   449  			)
   450  		})
   451  		if err != nil {
   452  			return err
   453  		}
   454  	}
   455  
   456  	log.Infof("Marking all existing payments with status Completed")
   457  
   458  	// Get the bucket dedicated to storing payments
   459  	bucket := tx.ReadWriteBucket(paymentBucket)
   460  	if bucket == nil {
   461  		return nil
   462  	}
   463  
   464  	// For each payment in the bucket, deserialize the payment and mark it
   465  	// as completed.
   466  	err = bucket.ForEach(func(k, v []byte) error {
   467  		// Ignores if it is sub-bucket.
   468  		if v == nil {
   469  			return nil
   470  		}
   471  
   472  		r := bytes.NewReader(v)
   473  		payment, err := deserializeOutgoingPayment(r)
   474  		if err != nil {
   475  			return err
   476  		}
   477  
   478  		// Calculate payment hash for current payment.
   479  		paymentHash := sha256.Sum256(payment.PaymentPreimage[:])
   480  
   481  		// Update status for current payment to completed. If it fails,
   482  		// the migration is aborted and the payment bucket is returned
   483  		// to its previous state.
   484  		return paymentStatuses.Put(paymentHash[:], StatusSucceeded.Bytes())
   485  	})
   486  	if err != nil {
   487  		return err
   488  	}
   489  
   490  	log.Infof("Migration of payment statuses complete!")
   491  
   492  	return nil
   493  }
   494  
   495  // MigratePruneEdgeUpdateIndex is a database migration that attempts to resolve
   496  // some lingering bugs with regards to edge policies and their update index.
   497  // Stale entries within the edge update index were not being properly pruned due
   498  // to a miscalculation on the offset of an edge's policy last update. This
   499  // migration also fixes the case where the public keys within edge policies were
   500  // being serialized with an extra byte, causing an even greater error when
   501  // attempting to perform the offset calculation described earlier.
   502  func MigratePruneEdgeUpdateIndex(tx kvdb.RwTx) error {
   503  	// To begin the migration, we'll retrieve the update index bucket. If it
   504  	// does not exist, we have nothing left to do so we can simply exit.
   505  	edges := tx.ReadWriteBucket(edgeBucket)
   506  	if edges == nil {
   507  		return nil
   508  	}
   509  	edgeUpdateIndex := edges.NestedReadWriteBucket(edgeUpdateIndexBucket)
   510  	if edgeUpdateIndex == nil {
   511  		return nil
   512  	}
   513  
   514  	// Retrieve some buckets that will be needed later on. These should
   515  	// already exist given the assumption that the buckets above do as
   516  	// well.
   517  	edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
   518  	if err != nil {
   519  		return fmt.Errorf("error creating edge index bucket: %s", err)
   520  	}
   521  	if edgeIndex == nil {
   522  		return fmt.Errorf("unable to create/fetch edge index " +
   523  			"bucket")
   524  	}
   525  	nodes, err := tx.CreateTopLevelBucket(nodeBucket)
   526  	if err != nil {
   527  		return fmt.Errorf("unable to make node bucket")
   528  	}
   529  
   530  	log.Info("Migrating database to properly prune edge update index")
   531  
   532  	// We'll need to properly prune all the outdated entries within the edge
   533  	// update index. To do so, we'll gather all of the existing policies
   534  	// within the graph to re-populate them later on.
   535  	var edgeKeys [][]byte
   536  	err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) error {
   537  		// All valid entries are indexed by a public key (33 bytes)
   538  		// followed by a channel ID (8 bytes), so we'll skip any entries
   539  		// with keys that do not match this.
   540  		if len(edgeKey) != 33+8 {
   541  			return nil
   542  		}
   543  
   544  		edgeKeys = append(edgeKeys, edgeKey)
   545  
   546  		return nil
   547  	})
   548  	if err != nil {
   549  		return fmt.Errorf("unable to gather existing edge policies: %v",
   550  			err)
   551  	}
   552  
   553  	log.Info("Constructing set of edge update entries to purge.")
   554  
   555  	// Build the set of keys that we will remove from the edge update index.
   556  	// This will include all keys contained within the bucket.
   557  	var updateKeysToRemove [][]byte
   558  	err = edgeUpdateIndex.ForEach(func(updKey, _ []byte) error {
   559  		updateKeysToRemove = append(updateKeysToRemove, updKey)
   560  		return nil
   561  	})
   562  	if err != nil {
   563  		return fmt.Errorf("unable to gather existing edge updates: %v",
   564  			err)
   565  	}
   566  
   567  	log.Infof("Removing %d entries from edge update index.",
   568  		len(updateKeysToRemove))
   569  
   570  	// With the set of keys contained in the edge update index constructed,
   571  	// we'll proceed in purging all of them from the index.
   572  	for _, updKey := range updateKeysToRemove {
   573  		if err := edgeUpdateIndex.Delete(updKey); err != nil {
   574  			return err
   575  		}
   576  	}
   577  
   578  	log.Infof("Repopulating edge update index with %d valid entries.",
   579  		len(edgeKeys))
   580  
   581  	// For each edge key, we'll retrieve the policy, deserialize it, and
   582  	// re-add it to the different buckets. By doing so, we'll ensure that
   583  	// all existing edge policies are serialized correctly within their
   584  	// respective buckets and that the correct entries are populated within
   585  	// the edge update index.
   586  	for _, edgeKey := range edgeKeys {
   587  		edgePolicyBytes := edges.Get(edgeKey)
   588  
   589  		// Skip any entries with unknown policies as there will not be
   590  		// any entries for them in the edge update index.
   591  		if bytes.Equal(edgePolicyBytes, unknownPolicy) {
   592  			continue
   593  		}
   594  
   595  		edgePolicy, err := deserializeChanEdgePolicy(
   596  			bytes.NewReader(edgePolicyBytes), nodes,
   597  		)
   598  		if err != nil {
   599  			return err
   600  		}
   601  
   602  		_, err = updateEdgePolicy(tx, edgePolicy)
   603  		if err != nil {
   604  			return err
   605  		}
   606  	}
   607  
   608  	log.Info("Migration to properly prune edge update index complete!")
   609  
   610  	return nil
   611  }
   612  
   613  // MigrateOptionalChannelCloseSummaryFields migrates the serialized format of
   614  // ChannelCloseSummary to a format where optional fields' presence is indicated
   615  // with boolean markers.
   616  func MigrateOptionalChannelCloseSummaryFields(tx kvdb.RwTx) error {
   617  	closedChanBucket := tx.ReadWriteBucket(closedChannelBucket)
   618  	if closedChanBucket == nil {
   619  		return nil
   620  	}
   621  
   622  	log.Info("Migrating to new closed channel format...")
   623  
   624  	// We store the converted keys and values and put them back into the
   625  	// database after the loop, since modifying the bucket within the
   626  	// ForEach loop is not safe.
   627  	var closedChansKeys [][]byte
   628  	var closedChansValues [][]byte
   629  	err := closedChanBucket.ForEach(func(chanID, summary []byte) error {
   630  		r := bytes.NewReader(summary)
   631  
   632  		// Read the old (v6) format from the database.
   633  		c, err := deserializeCloseChannelSummaryV6(r)
   634  		if err != nil {
   635  			return err
   636  		}
   637  
   638  		// Serialize using the new format, and put back into the
   639  		// bucket.
   640  		var b bytes.Buffer
   641  		if err := serializeChannelCloseSummary(&b, c); err != nil {
   642  			return err
   643  		}
   644  
   645  		// Now that we know the modifications was successful, we'll
   646  		// Store the key and value to our slices, and write it back to
   647  		// disk in the new format after the ForEach loop is over.
   648  		closedChansKeys = append(closedChansKeys, chanID)
   649  		closedChansValues = append(closedChansValues, b.Bytes())
   650  		return nil
   651  	})
   652  	if err != nil {
   653  		return fmt.Errorf("unable to update closed channels: %v", err)
   654  	}
   655  
   656  	// Now put the new format back into the DB.
   657  	for i := range closedChansKeys {
   658  		key := closedChansKeys[i]
   659  		value := closedChansValues[i]
   660  		if err := closedChanBucket.Put(key, value); err != nil {
   661  			return err
   662  		}
   663  	}
   664  
   665  	log.Info("Migration to new closed channel format complete!")
   666  
   667  	return nil
   668  }
   669  
   670  var messageStoreBucket = []byte("message-store")
   671  
   672  // MigrateGossipMessageStoreKeys migrates the key format for gossip messages
   673  // found in the message store to a new one that takes into consideration the of
   674  // the message being stored.
   675  func MigrateGossipMessageStoreKeys(tx kvdb.RwTx) error {
   676  	// We'll start by retrieving the bucket in which these messages are
   677  	// stored within. If there isn't one, there's nothing left for us to do
   678  	// so we can avoid the migration.
   679  	messageStore := tx.ReadWriteBucket(messageStoreBucket)
   680  	if messageStore == nil {
   681  		return nil
   682  	}
   683  
   684  	log.Info("Migrating to the gossip message store new key format")
   685  
   686  	// Otherwise we'll proceed with the migration. We'll start by coalescing
   687  	// all the current messages within the store, which are indexed by the
   688  	// public key of the peer which they should be sent to, followed by the
   689  	// short channel ID of the channel for which the message belongs to. We
   690  	// should only expect to find channel announcement signatures as that
   691  	// was the only support message type previously.
   692  	msgs := make(map[[33 + 8]byte]*lnwire.AnnounceSignatures)
   693  	err := messageStore.ForEach(func(k, v []byte) error {
   694  		var msgKey [33 + 8]byte
   695  		copy(msgKey[:], k)
   696  
   697  		msg := &lnwire.AnnounceSignatures{}
   698  		if err := msg.Decode(bytes.NewReader(v), 0); err != nil {
   699  			return err
   700  		}
   701  
   702  		msgs[msgKey] = msg
   703  
   704  		return nil
   705  
   706  	})
   707  	if err != nil {
   708  		return err
   709  	}
   710  
   711  	// Then, we'll go over all of our messages, remove their previous entry,
   712  	// and add another with the new key format. Once we've done this for
   713  	// every message, we can consider the migration complete.
   714  	for oldMsgKey, msg := range msgs {
   715  		if err := messageStore.Delete(oldMsgKey[:]); err != nil {
   716  			return err
   717  		}
   718  
   719  		// Construct the new key for which we'll find this message with
   720  		// in the store. It'll be the same as the old, but we'll also
   721  		// include the message type.
   722  		var msgType [2]byte
   723  		binary.BigEndian.PutUint16(msgType[:], uint16(msg.MsgType()))
   724  		newMsgKey := append(oldMsgKey[:], msgType[:]...)
   725  
   726  		// Serialize the message with its wire encoding.
   727  		var b bytes.Buffer
   728  		if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil {
   729  			return err
   730  		}
   731  
   732  		if err := messageStore.Put(newMsgKey, b.Bytes()); err != nil {
   733  			return err
   734  		}
   735  	}
   736  
   737  	log.Info("Migration to the gossip message store new key format complete!")
   738  
   739  	return nil
   740  }
   741  
   742  // MigrateOutgoingPayments moves the OutgoingPayments into a new bucket format
   743  // where they all reside in a top-level bucket indexed by the payment hash. In
   744  // this sub-bucket we store information relevant to this payment, such as the
   745  // payment status.
   746  //
   747  // Since the router cannot handle resumed payments that have the status
   748  // InFlight (we have no PaymentAttemptInfo available for pre-migration
   749  // payments) we delete those statuses, so only Completed payments remain in the
   750  // new bucket structure.
   751  func MigrateOutgoingPayments(tx kvdb.RwTx) error {
   752  	log.Infof("Migrating outgoing payments to new bucket structure")
   753  
   754  	oldPayments := tx.ReadWriteBucket(paymentBucket)
   755  
   756  	// Return early if there are no payments to migrate.
   757  	if oldPayments == nil {
   758  		log.Infof("No outgoing payments found, nothing to migrate.")
   759  		return nil
   760  	}
   761  
   762  	newPayments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
   763  	if err != nil {
   764  		return err
   765  	}
   766  
   767  	// Helper method to get the source pubkey. We define it such that we
   768  	// only attempt to fetch it if needed.
   769  	sourcePub := func() ([33]byte, error) {
   770  		var pub [33]byte
   771  		nodes := tx.ReadWriteBucket(nodeBucket)
   772  		if nodes == nil {
   773  			return pub, ErrGraphNotFound
   774  		}
   775  
   776  		selfPub := nodes.Get(sourceKey)
   777  		if selfPub == nil {
   778  			return pub, ErrSourceNodeNotSet
   779  		}
   780  		copy(pub[:], selfPub[:])
   781  		return pub, nil
   782  	}
   783  
   784  	err = oldPayments.ForEach(func(k, v []byte) error {
   785  		// Ignores if it is sub-bucket.
   786  		if v == nil {
   787  			return nil
   788  		}
   789  
   790  		// Read the old payment format.
   791  		r := bytes.NewReader(v)
   792  		payment, err := deserializeOutgoingPayment(r)
   793  		if err != nil {
   794  			return err
   795  		}
   796  
   797  		// Calculate payment hash from the payment preimage.
   798  		preimage := lntypes.Preimage(payment.PaymentPreimage)
   799  		paymentHash := preimage.Hash()
   800  
   801  		// Now create and add a PaymentCreationInfo to the bucket.
   802  		c := &PaymentCreationInfo{
   803  			PaymentHash:    paymentHash,
   804  			Value:          payment.Terms.Value,
   805  			CreationDate:   payment.CreationDate,
   806  			PaymentRequest: payment.PaymentRequest,
   807  		}
   808  
   809  		var infoBuf bytes.Buffer
   810  		if err := serializePaymentCreationInfo(&infoBuf, c); err != nil {
   811  			return err
   812  		}
   813  
   814  		sourcePubKey, err := sourcePub()
   815  		if err != nil {
   816  			return err
   817  		}
   818  
   819  		// Do the same for the PaymentAttemptInfo.
   820  		totalAmt := payment.Terms.Value + payment.Fee
   821  		rt := Route{
   822  			TotalTimeLock: payment.TimeLockLength,
   823  			TotalAmount:   totalAmt,
   824  			SourcePubKey:  sourcePubKey,
   825  			Hops:          []*Hop{},
   826  		}
   827  		for _, hop := range payment.Path {
   828  			rt.Hops = append(rt.Hops, &Hop{
   829  				PubKeyBytes:  hop,
   830  				AmtToForward: totalAmt,
   831  			})
   832  		}
   833  
   834  		// Since the old format didn't store the fee for individual
   835  		// hops, we let the last hop eat the whole fee for the total to
   836  		// add up.
   837  		if len(rt.Hops) > 0 {
   838  			rt.Hops[len(rt.Hops)-1].AmtToForward = payment.Terms.Value
   839  		}
   840  
   841  		// Since we don't have the session key for old payments, we
   842  		// create a random one to be able to serialize the attempt
   843  		// info.
   844  		priv, _ := secp256k1.GeneratePrivateKey()
   845  		s := &PaymentAttemptInfo{
   846  			PaymentID:  0,    // unknown.
   847  			SessionKey: priv, // unknown.
   848  			Route:      rt,
   849  		}
   850  
   851  		var attemptBuf bytes.Buffer
   852  		if err := serializePaymentAttemptInfoMigration9(&attemptBuf, s); err != nil {
   853  			return err
   854  		}
   855  
   856  		// Reuse the existing payment sequence number.
   857  		var seqNum [8]byte
   858  		copy(seqNum[:], k)
   859  
   860  		// Create a bucket indexed by the payment hash.
   861  		bucket, err := newPayments.CreateBucket(paymentHash[:])
   862  
   863  		// If the bucket already exists, it means that we are migrating
   864  		// from a database containing duplicate payments to a payment
   865  		// hash. To keep this information, we store such duplicate
   866  		// payments in a sub-bucket.
   867  		if err == kvdb.ErrBucketExists {
   868  			pHashBucket := newPayments.NestedReadWriteBucket(paymentHash[:])
   869  
   870  			// Create a bucket for duplicate payments within this
   871  			// payment hash's bucket.
   872  			dup, err := pHashBucket.CreateBucketIfNotExists(
   873  				paymentDuplicateBucket,
   874  			)
   875  			if err != nil {
   876  				return err
   877  			}
   878  
   879  			// Each duplicate will get its own sub-bucket within
   880  			// this bucket, so use their sequence number to index
   881  			// them by.
   882  			bucket, err = dup.CreateBucket(seqNum[:])
   883  			if err != nil {
   884  				return err
   885  			}
   886  
   887  		} else if err != nil {
   888  			return err
   889  		}
   890  
   891  		// Store the payment's information to the bucket.
   892  		err = bucket.Put(paymentSequenceKey, seqNum[:])
   893  		if err != nil {
   894  			return err
   895  		}
   896  
   897  		err = bucket.Put(paymentCreationInfoKey, infoBuf.Bytes())
   898  		if err != nil {
   899  			return err
   900  		}
   901  
   902  		err = bucket.Put(paymentAttemptInfoKey, attemptBuf.Bytes())
   903  		if err != nil {
   904  			return err
   905  		}
   906  
   907  		err = bucket.Put(paymentSettleInfoKey, payment.PaymentPreimage[:])
   908  		if err != nil {
   909  			return err
   910  		}
   911  
   912  		return nil
   913  	})
   914  	if err != nil {
   915  		return err
   916  	}
   917  
   918  	// To continue producing unique sequence numbers, we set the sequence
   919  	// of the new bucket to that of the old one.
   920  	seq := oldPayments.Sequence()
   921  	if err := newPayments.SetSequence(seq); err != nil {
   922  		return err
   923  	}
   924  
   925  	// Now we delete the old buckets. Deleting the payment status buckets
   926  	// deletes all payment statuses other than Complete.
   927  	err = tx.DeleteTopLevelBucket(paymentStatusBucket)
   928  	if err != nil && err != kvdb.ErrBucketNotFound {
   929  		return err
   930  	}
   931  
   932  	// Finally delete the old payment bucket.
   933  	err = tx.DeleteTopLevelBucket(paymentBucket)
   934  	if err != nil && err != kvdb.ErrBucketNotFound {
   935  		return err
   936  	}
   937  
   938  	log.Infof("Migration of outgoing payment bucket structure completed!")
   939  	return nil
   940  }