github.com/decred/dcrlnd@v0.7.6/channeldb/migration21/migration.go (about)

     1  package migration21
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"fmt"
     7  
     8  	lnwire "github.com/decred/dcrlnd/channeldb/migration/lnwire21"
     9  	"github.com/decred/dcrlnd/channeldb/migration21/common"
    10  	"github.com/decred/dcrlnd/channeldb/migration21/current"
    11  	"github.com/decred/dcrlnd/channeldb/migration21/legacy"
    12  	"github.com/decred/dcrlnd/kvdb"
    13  )
    14  
    15  var (
    16  	byteOrder = binary.BigEndian
    17  
    18  	// openChanBucket stores all the currently open channels. This bucket
    19  	// has a second, nested bucket which is keyed by a node's ID. Within
    20  	// that node ID bucket, all attributes required to track, update, and
    21  	// close a channel are stored.
    22  	//
    23  	// openChan -> nodeID -> chanPoint
    24  	//
    25  	// TODO(roasbeef): flesh out comment
    26  	openChannelBucket = []byte("open-chan-bucket")
    27  
    28  	// commitDiffKey stores the current pending commitment state we've
    29  	// extended to the remote party (if any). Each time we propose a new
    30  	// state, we store the information necessary to reconstruct this state
    31  	// from the prior commitment. This allows us to resync the remote party
    32  	// to their expected state in the case of message loss.
    33  	//
    34  	// TODO(roasbeef): rename to commit chain?
    35  	commitDiffKey = []byte("commit-diff-key")
    36  
    37  	// unsignedAckedUpdatesKey is an entry in the channel bucket that
    38  	// contains the remote updates that we have acked, but not yet signed
    39  	// for in one of our remote commits.
    40  	unsignedAckedUpdatesKey = []byte("unsigned-acked-updates-key")
    41  
    42  	// remoteUnsignedLocalUpdatesKey is an entry in the channel bucket that
    43  	// contains the local updates that the remote party has acked, but
    44  	// has not yet signed for in one of their local commits.
    45  	remoteUnsignedLocalUpdatesKey = []byte("remote-unsigned-local-updates-key")
    46  
    47  	// networkResultStoreBucketKey is used for the root level bucket that
    48  	// stores the network result for each payment ID.
    49  	networkResultStoreBucketKey = []byte("network-result-store-bucket")
    50  
    51  	// closedChannelBucket stores summarization information concerning
    52  	// previously open, but now closed channels.
    53  	closedChannelBucket = []byte("closed-chan-bucket")
    54  
    55  	// fwdPackagesKey is the root-level bucket that all forwarding packages
    56  	// are written. This bucket is further subdivided based on the short
    57  	// channel ID of each channel.
    58  	fwdPackagesKey = []byte("fwd-packages")
    59  )
    60  
    61  // MigrateDatabaseWireMessages performs a migration in all areas that we
    62  // currently store wire messages without length prefixes. This includes the
    63  // CommitDiff struct, ChannelCloseSummary, LogUpdates, and also the
    64  // networkResult struct as well.
    65  func MigrateDatabaseWireMessages(tx kvdb.RwTx) error {
    66  	// The migration will proceed in three phases: we'll need to update any
    67  	// pending commit diffs, then any unsigned acked updates for all open
    68  	// channels, then finally we'll need to update all the current
    69  	// stored network results for payments in the switch.
    70  	//
    71  	// In this phase, we'll migrate the open channel data.
    72  	if err := migrateOpenChanBucket(tx); err != nil {
    73  		return err
    74  	}
    75  
    76  	// Next, we'll update all the present close channel summaries as well.
    77  	if err := migrateCloseChanSummaries(tx); err != nil {
    78  		return err
    79  	}
    80  
    81  	// We'll migrate forwarding packages, which have log updates as part of
    82  	// their serialized data.
    83  	if err := migrateForwardingPackages(tx); err != nil {
    84  		return err
    85  	}
    86  
    87  	// Finally, we'll update the pending network results as well.
    88  	return migrateNetworkResults(tx)
    89  }
    90  
    91  func migrateOpenChanBucket(tx kvdb.RwTx) error {
    92  	openChanBucket := tx.ReadWriteBucket(openChannelBucket)
    93  
    94  	// If no bucket is found, we can exit early.
    95  	if openChanBucket == nil {
    96  		return nil
    97  	}
    98  
    99  	type channelPath struct {
   100  		nodePub   []byte
   101  		chainHash []byte
   102  		chanPoint []byte
   103  	}
   104  	var channelPaths []channelPath
   105  	err := openChanBucket.ForEach(func(nodePub, v []byte) error {
   106  		// Ensure that this is a key the same size as a pubkey, and
   107  		// also that it leads directly to a bucket.
   108  		if len(nodePub) != 33 || v != nil {
   109  			return nil
   110  		}
   111  
   112  		nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
   113  		if nodeChanBucket == nil {
   114  			return fmt.Errorf("no bucket for node %x", nodePub)
   115  		}
   116  
   117  		// The next layer down is all the chains that this node
   118  		// has channels on with us.
   119  		return nodeChanBucket.ForEach(func(chainHash, v []byte) error {
   120  			// If there's a value, it's not a bucket so
   121  			// ignore it.
   122  			if v != nil {
   123  				return nil
   124  			}
   125  
   126  			chainBucket := nodeChanBucket.NestedReadBucket(
   127  				chainHash,
   128  			)
   129  			if chainBucket == nil {
   130  				return fmt.Errorf("unable to read "+
   131  					"bucket for chain=%x", chainHash)
   132  			}
   133  
   134  			return chainBucket.ForEach(func(chanPoint, v []byte) error {
   135  				// If there's a value, it's not a bucket so
   136  				// ignore it.
   137  				if v != nil {
   138  					return nil
   139  				}
   140  
   141  				channelPaths = append(channelPaths, channelPath{
   142  					nodePub:   nodePub,
   143  					chainHash: chainHash,
   144  					chanPoint: chanPoint,
   145  				})
   146  
   147  				return nil
   148  			})
   149  		})
   150  	})
   151  	if err != nil {
   152  		return err
   153  	}
   154  
   155  	// Now that we have all the paths of the channel we need to migrate,
   156  	// we'll update all the state in a distinct step to avoid weird
   157  	// behavior from  modifying buckets in a ForEach statement.
   158  	for _, channelPath := range channelPaths {
   159  		// First, we'll extract it from the node's chain bucket.
   160  		nodeChanBucket := openChanBucket.NestedReadWriteBucket(
   161  			channelPath.nodePub,
   162  		)
   163  		chainBucket := nodeChanBucket.NestedReadWriteBucket(
   164  			channelPath.chainHash,
   165  		)
   166  		chanBucket := chainBucket.NestedReadWriteBucket(
   167  			channelPath.chanPoint,
   168  		)
   169  
   170  		// At this point, we have the channel bucket now, so we'll
   171  		// check to see if this channel has a pending commitment or
   172  		// not.
   173  		commitDiffBytes := chanBucket.Get(commitDiffKey)
   174  		if commitDiffBytes != nil {
   175  			// Now that we have the commit diff in the _old_
   176  			// encoding, we'll write it back to disk using the new
   177  			// encoding which has a length prefix in front of the
   178  			// CommitSig.
   179  			commitDiff, err := legacy.DeserializeCommitDiff(
   180  				bytes.NewReader(commitDiffBytes),
   181  			)
   182  			if err != nil {
   183  				return err
   184  			}
   185  
   186  			var b bytes.Buffer
   187  			err = current.SerializeCommitDiff(&b, commitDiff)
   188  			if err != nil {
   189  				return err
   190  			}
   191  
   192  			err = chanBucket.Put(commitDiffKey, b.Bytes())
   193  			if err != nil {
   194  				return err
   195  			}
   196  		}
   197  
   198  		// With the commit diff migrated, we'll now check to see if
   199  		// there're any un-acked updates we need to migrate as well.
   200  		updateBytes := chanBucket.Get(unsignedAckedUpdatesKey)
   201  		if updateBytes != nil {
   202  			// We have un-acked updates we need to migrate so we'll
   203  			// decode then re-encode them here using the new
   204  			// format.
   205  			legacyUnackedUpdates, err := legacy.DeserializeLogUpdates(
   206  				bytes.NewReader(updateBytes),
   207  			)
   208  			if err != nil {
   209  				return err
   210  			}
   211  
   212  			var b bytes.Buffer
   213  			err = current.SerializeLogUpdates(&b, legacyUnackedUpdates)
   214  			if err != nil {
   215  				return err
   216  			}
   217  
   218  			err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes())
   219  			if err != nil {
   220  				return err
   221  			}
   222  		}
   223  
   224  		// Remote unsiged updates as well.
   225  		updateBytes = chanBucket.Get(remoteUnsignedLocalUpdatesKey)
   226  		if updateBytes != nil {
   227  			legacyUnsignedUpdates, err := legacy.DeserializeLogUpdates(
   228  				bytes.NewReader(updateBytes),
   229  			)
   230  			if err != nil {
   231  				return err
   232  			}
   233  
   234  			var b bytes.Buffer
   235  			err = current.SerializeLogUpdates(&b, legacyUnsignedUpdates)
   236  			if err != nil {
   237  				return err
   238  			}
   239  
   240  			err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b.Bytes())
   241  			if err != nil {
   242  				return err
   243  			}
   244  		}
   245  	}
   246  
   247  	return nil
   248  }
   249  
   250  func migrateCloseChanSummaries(tx kvdb.RwTx) error {
   251  	closedChanBucket := tx.ReadWriteBucket(closedChannelBucket)
   252  
   253  	// Exit early if bucket is not found.
   254  	if closedChannelBucket == nil {
   255  		return nil
   256  	}
   257  
   258  	type closedChan struct {
   259  		chanKey      []byte
   260  		summaryBytes []byte
   261  	}
   262  	var closedChans []closedChan
   263  	err := closedChanBucket.ForEach(func(k, v []byte) error {
   264  		closedChans = append(closedChans, closedChan{
   265  			chanKey:      k,
   266  			summaryBytes: v,
   267  		})
   268  		return nil
   269  	})
   270  	if err != nil {
   271  		return err
   272  	}
   273  
   274  	for _, closedChan := range closedChans {
   275  		oldSummary, err := legacy.DeserializeCloseChannelSummary(
   276  			bytes.NewReader(closedChan.summaryBytes),
   277  		)
   278  		if err != nil {
   279  			return err
   280  		}
   281  
   282  		var newSummaryBytes bytes.Buffer
   283  		err = current.SerializeChannelCloseSummary(
   284  			&newSummaryBytes, oldSummary,
   285  		)
   286  		if err != nil {
   287  			return err
   288  		}
   289  
   290  		err = closedChanBucket.Put(
   291  			closedChan.chanKey, newSummaryBytes.Bytes(),
   292  		)
   293  		if err != nil {
   294  			return err
   295  		}
   296  	}
   297  	return nil
   298  }
   299  
   300  func migrateForwardingPackages(tx kvdb.RwTx) error {
   301  	fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
   302  
   303  	// Exit early if bucket is not found.
   304  	if fwdPkgBkt == nil {
   305  		return nil
   306  	}
   307  
   308  	// We Go through the bucket and fetches all short channel IDs.
   309  	var sources []lnwire.ShortChannelID
   310  	err := fwdPkgBkt.ForEach(func(k, v []byte) error {
   311  		source := lnwire.NewShortChanIDFromInt(byteOrder.Uint64(k))
   312  		sources = append(sources, source)
   313  		return nil
   314  	})
   315  	if err != nil {
   316  		return err
   317  	}
   318  
   319  	// Now load all forwading packages using the legacy encoding.
   320  	var pkgsToMigrate []*common.FwdPkg
   321  	for _, source := range sources {
   322  		packager := legacy.NewChannelPackager(source)
   323  		fwdPkgs, err := packager.LoadFwdPkgs(tx)
   324  		if err != nil {
   325  			return err
   326  		}
   327  
   328  		pkgsToMigrate = append(pkgsToMigrate, fwdPkgs...)
   329  	}
   330  
   331  	// Add back the packages using the current encoding.
   332  	for _, pkg := range pkgsToMigrate {
   333  		packager := current.NewChannelPackager(pkg.Source)
   334  		err := packager.AddFwdPkg(tx, pkg)
   335  		if err != nil {
   336  			return err
   337  		}
   338  	}
   339  
   340  	return nil
   341  }
   342  
   343  func migrateNetworkResults(tx kvdb.RwTx) error {
   344  	networkResults := tx.ReadWriteBucket(networkResultStoreBucketKey)
   345  
   346  	// Exit early if bucket is not found.
   347  	if networkResults == nil {
   348  		return nil
   349  	}
   350  
   351  	// Similar to the prior migrations, we'll do this one in two phases:
   352  	// we'll first grab all the keys we need to migrate in one loop, then
   353  	// update them all in another loop.
   354  	var netResultsToMigrate [][2][]byte
   355  	err := networkResults.ForEach(func(k, v []byte) error {
   356  		netResultsToMigrate = append(netResultsToMigrate, [2][]byte{
   357  			k, v,
   358  		})
   359  		return nil
   360  	})
   361  	if err != nil {
   362  		return err
   363  	}
   364  
   365  	for _, netResult := range netResultsToMigrate {
   366  		resKey := netResult[0]
   367  		resBytes := netResult[1]
   368  		oldResult, err := legacy.DeserializeNetworkResult(
   369  			bytes.NewReader(resBytes),
   370  		)
   371  		if err != nil {
   372  			return err
   373  		}
   374  
   375  		var newResultBuf bytes.Buffer
   376  		err = current.SerializeNetworkResult(&newResultBuf, oldResult)
   377  		if err != nil {
   378  			return err
   379  		}
   380  
   381  		err = networkResults.Put(resKey, newResultBuf.Bytes())
   382  		if err != nil {
   383  			return err
   384  		}
   385  	}
   386  	return nil
   387  }