github.com/decred/dcrlnd@v0.7.6/channeldb/db.go (about)

     1  package channeldb
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"fmt"
     7  	"io/ioutil"
     8  	"net"
     9  	"os"
    10  
    11  	"github.com/decred/dcrd/dcrec/secp256k1/v4"
    12  	"github.com/decred/dcrd/wire"
    13  	mig "github.com/decred/dcrlnd/channeldb/migration"
    14  	"github.com/decred/dcrlnd/channeldb/migration12"
    15  	"github.com/decred/dcrlnd/channeldb/migration13"
    16  	"github.com/decred/dcrlnd/channeldb/migration16"
    17  	"github.com/decred/dcrlnd/channeldb/migration20"
    18  	"github.com/decred/dcrlnd/channeldb/migration21"
    19  	"github.com/decred/dcrlnd/channeldb/migration23"
    20  	"github.com/decred/dcrlnd/channeldb/migration24"
    21  	"github.com/decred/dcrlnd/channeldb/migration_01_to_11"
    22  	"github.com/decred/dcrlnd/clock"
    23  	"github.com/decred/dcrlnd/kvdb"
    24  	"github.com/decred/dcrlnd/lnwire"
    25  	"github.com/decred/dcrlnd/routing/route"
    26  	"github.com/go-errors/errors"
    27  )
    28  
    29  const (
    30  	dbName = "channel.db"
    31  )
    32  
    33  var (
    34  	// ErrDryRunMigrationOK signals that a migration executed successful,
    35  	// but we intentionally did not commit the result.
    36  	ErrDryRunMigrationOK = errors.New("dry run migration successful")
    37  )
    38  
    39  // migration is a function which takes a prior outdated version of the database
    40  // instances and mutates the key/bucket structure to arrive at a more
    41  // up-to-date version of the database.
    42  type migration func(tx kvdb.RwTx) error
    43  
    44  type version struct {
    45  	number    uint32
    46  	migration migration
    47  }
    48  
    49  var (
    50  	// dbVersions is storing all versions of database. If current version
    51  	// of database don't match with latest version this list will be used
    52  	// for retrieving all migration function that are need to apply to the
    53  	// current db.
    54  	dbVersions = []version{
    55  		{
    56  			// The base DB version requires no migration.
    57  			number:    0,
    58  			migration: nil,
    59  		},
    60  		{
    61  			// The version of the database where two new indexes
    62  			// for the update time of node and channel updates were
    63  			// added.
    64  			number:    1,
    65  			migration: migration_01_to_11.MigrateNodeAndEdgeUpdateIndex,
    66  		},
    67  		{
    68  			// The DB version that added the invoice event time
    69  			// series.
    70  			number:    2,
    71  			migration: migration_01_to_11.MigrateInvoiceTimeSeries,
    72  		},
    73  		{
    74  			// The DB version that updated the embedded invoice in
    75  			// outgoing payments to match the new format.
    76  			number:    3,
    77  			migration: migration_01_to_11.MigrateInvoiceTimeSeriesOutgoingPayments,
    78  		},
    79  		{
    80  			// The version of the database where every channel
    81  			// always has two entries in the edges bucket. If
    82  			// a policy is unknown, this will be represented
    83  			// by a special byte sequence.
    84  			number:    4,
    85  			migration: migration_01_to_11.MigrateEdgePolicies,
    86  		},
    87  		{
    88  			// The DB version where we persist each attempt to send
    89  			// an HTLC to a payment hash, and track whether the
    90  			// payment is in-flight, succeeded, or failed.
    91  			number:    5,
    92  			migration: migration_01_to_11.PaymentStatusesMigration,
    93  		},
    94  		{
    95  			// The DB version that properly prunes stale entries
    96  			// from the edge update index.
    97  			number:    6,
    98  			migration: migration_01_to_11.MigratePruneEdgeUpdateIndex,
    99  		},
   100  		{
   101  			// The DB version that migrates the ChannelCloseSummary
   102  			// to a format where optional fields are indicated with
   103  			// boolean flags.
   104  			number:    7,
   105  			migration: migration_01_to_11.MigrateOptionalChannelCloseSummaryFields,
   106  		},
   107  		{
   108  			// The DB version that changes the gossiper's message
   109  			// store keys to account for the message's type and
   110  			// ShortChannelID.
   111  			number:    8,
   112  			migration: migration_01_to_11.MigrateGossipMessageStoreKeys,
   113  		},
   114  		{
   115  			// The DB version where the payments and payment
   116  			// statuses are moved to being stored in a combined
   117  			// bucket.
   118  			number:    9,
   119  			migration: migration_01_to_11.MigrateOutgoingPayments,
   120  		},
   121  		{
   122  			// The DB version where we started to store legacy
   123  			// payload information for all routes, as well as the
   124  			// optional TLV records.
   125  			number:    10,
   126  			migration: migration_01_to_11.MigrateRouteSerialization,
   127  		},
   128  		{
   129  			// Add invoice htlc and cltv delta fields.
   130  			number:    11,
   131  			migration: migration_01_to_11.MigrateInvoices,
   132  		},
   133  		{
   134  			// Migrate to TLV invoice bodies, add payment address
   135  			// and features, remove receipt.
   136  			number:    12,
   137  			migration: migration12.MigrateInvoiceTLV,
   138  		},
   139  		{
   140  			// Migrate to multi-path payments.
   141  			number:    13,
   142  			migration: migration13.MigrateMPP,
   143  		},
   144  		{
   145  			// Initialize payment address index and begin using it
   146  			// as the default index, falling back to payment hash
   147  			// index.
   148  			number:    14,
   149  			migration: mig.CreateTLB(payAddrIndexBucket),
   150  		},
   151  		{
   152  			// Initialize payment index bucket which will be used
   153  			// to index payments by sequence number. This index will
   154  			// be used to allow more efficient ListPayments queries.
   155  			number:    15,
   156  			migration: mig.CreateTLB(paymentsIndexBucket),
   157  		},
   158  		{
   159  			// Add our existing payments to the index bucket created
   160  			// in migration 15.
   161  			number:    16,
   162  			migration: migration16.MigrateSequenceIndex,
   163  		},
   164  		{
   165  			// Create a top level bucket which will store extra
   166  			// information about channel closes.
   167  			number:    17,
   168  			migration: mig.CreateTLB(closeSummaryBucket),
   169  		},
   170  		{
   171  			// Create a top level bucket which holds information
   172  			// about our peers.
   173  			number:    18,
   174  			migration: mig.CreateTLB(peersBucket),
   175  		},
   176  		{
   177  			// Create a top level bucket which holds outpoint
   178  			// information.
   179  			number:    19,
   180  			migration: mig.CreateTLB(outpointBucket),
   181  		},
   182  		{
   183  			// Migrate some data to the outpoint index.
   184  			number:    20,
   185  			migration: migration20.MigrateOutpointIndex,
   186  		},
   187  		{
   188  			// Migrate to length prefixed wire messages everywhere
   189  			// in the database.
   190  			number:    21,
   191  			migration: migration21.MigrateDatabaseWireMessages,
   192  		},
   193  		{
   194  			// Initialize set id index so that invoices can be
   195  			// queried by individual htlc sets.
   196  			number:    22,
   197  			migration: mig.CreateTLB(setIDIndexBucket),
   198  		},
   199  		{
   200  			number:    23,
   201  			migration: migration23.MigrateHtlcAttempts,
   202  		},
   203  		{
   204  			// Remove old forwarding packages of closed channels.
   205  			number:    24,
   206  			migration: migration24.MigrateFwdPkgCleanup,
   207  		},
   208  		// Note: There are decred-only changes in the codec which may
   209  		// require porting when bringing upstream migrations from lnd.
   210  		// In particular:
   211  		// - {read,write}Outpoint include the tree.
   212  	}
   213  
   214  	// Big endian is the preferred byte order, due to cursor scans over
   215  	// integer keys iterating in order.
   216  	byteOrder = binary.BigEndian
   217  
   218  	// channelOpeningStateBucket is the database bucket used to store the
   219  	// channelOpeningState for each channel that is currently in the process
   220  	// of being opened.
   221  	channelOpeningStateBucket = []byte("channelOpeningState")
   222  )
   223  
   224  // DB is the primary datastore for the lnd daemon. The database stores
   225  // information related to nodes, routing data, open/closed channels, fee
   226  // schedules, and reputation data.
   227  type DB struct {
   228  	kvdb.Backend
   229  
   230  	// channelStateDB separates all DB operations on channel state.
   231  	channelStateDB *ChannelStateDB
   232  
   233  	dbPath string
   234  	graph  *ChannelGraph
   235  	clock  clock.Clock
   236  	dryRun bool
   237  }
   238  
   239  // Open opens or creates channeldb. Any necessary schemas migrations due
   240  // to updates will take place as necessary.
   241  // TODO(bhandras): deprecate this function.
   242  func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
   243  	opts := DefaultOptions()
   244  	for _, modifier := range modifiers {
   245  		modifier(&opts)
   246  	}
   247  
   248  	backend, err := kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{
   249  		DBPath:            dbPath,
   250  		DBFileName:        dbName,
   251  		NoFreelistSync:    opts.NoFreelistSync,
   252  		AutoCompact:       opts.AutoCompact,
   253  		AutoCompactMinAge: opts.AutoCompactMinAge,
   254  		DBTimeout:         opts.DBTimeout,
   255  	})
   256  	if err != nil {
   257  		return nil, err
   258  	}
   259  
   260  	db, err := CreateWithBackend(backend, modifiers...)
   261  	if err == nil {
   262  		db.dbPath = dbPath
   263  	}
   264  	return db, err
   265  }
   266  
   267  // CreateWithBackend creates channeldb instance using the passed kvdb.Backend.
   268  // Any necessary schemas migrations due to updates will take place as necessary.
   269  func CreateWithBackend(backend kvdb.Backend, modifiers ...OptionModifier) (*DB, error) {
   270  	if err := initChannelDB(backend); err != nil {
   271  		return nil, err
   272  	}
   273  
   274  	opts := DefaultOptions()
   275  	for _, modifier := range modifiers {
   276  		modifier(&opts)
   277  	}
   278  
   279  	chanDB := &DB{
   280  		Backend: backend,
   281  		channelStateDB: &ChannelStateDB{
   282  			linkNodeDB: &LinkNodeDB{
   283  				backend: backend,
   284  			},
   285  			backend: backend,
   286  		},
   287  		clock:  opts.clock,
   288  		dryRun: opts.dryRun,
   289  	}
   290  
   291  	// Set the parent pointer (only used in tests).
   292  	chanDB.channelStateDB.parent = chanDB
   293  
   294  	var err error
   295  	chanDB.graph, err = NewChannelGraph(
   296  		backend, opts.RejectCacheSize, opts.ChannelCacheSize,
   297  		opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
   298  		opts.UseGraphCache,
   299  	)
   300  	if err != nil {
   301  		return nil, err
   302  	}
   303  
   304  	// Synchronize the version of database and apply migrations if needed.
   305  	if err := chanDB.syncVersions(dbVersions); err != nil {
   306  		backend.Close()
   307  		return nil, err
   308  	}
   309  
   310  	// Run dcrlnd-specific init code (that hasn't been ported to lnd).
   311  	if err := chanDB.initDcrlndFeatures(); err != nil {
   312  		backend.Close()
   313  		return nil, err
   314  	}
   315  
   316  	return chanDB, nil
   317  }
   318  
   319  // Path returns the file path to the channel database.
   320  func (d *DB) Path() string {
   321  	return d.dbPath
   322  }
   323  
   324  var dbTopLevelBuckets = [][]byte{
   325  	openChannelBucket,
   326  	closedChannelBucket,
   327  	chanReestablishWaitTimeBucket, // DCR only
   328  	forwardingLogBucket,
   329  	fwdPackagesKey,
   330  	invoiceBucket,
   331  	payAddrIndexBucket,
   332  	setIDIndexBucket,
   333  	paymentsIndexBucket,
   334  	peersBucket,
   335  	nodeInfoBucket,
   336  	metaBucket,
   337  	closeSummaryBucket,
   338  	outpointBucket,
   339  	historicalChannelBucket,
   340  }
   341  
   342  // Wipe completely deletes all saved state within all used buckets within the
   343  // database. The deletion is done in a single transaction, therefore this
   344  // operation is fully atomic.
   345  func (d *DB) Wipe() error {
   346  	err := kvdb.Update(d, func(tx kvdb.RwTx) error {
   347  		for _, tlb := range dbTopLevelBuckets {
   348  			err := tx.DeleteTopLevelBucket(tlb)
   349  			if err != nil && err != kvdb.ErrBucketNotFound {
   350  				return err
   351  			}
   352  		}
   353  		return nil
   354  	}, func() {})
   355  	if err != nil {
   356  		return err
   357  	}
   358  
   359  	return initChannelDB(d.Backend)
   360  }
   361  
   362  // initChannelDB creates and initializes a fresh version of channeldb. In the
   363  // case that the target path has not yet been created or doesn't yet exist, then
   364  // the path is created. Additionally, all required top-level buckets used within
   365  // the database are created.
   366  func initChannelDB(db kvdb.Backend) error {
   367  	err := kvdb.Update(db, func(tx kvdb.RwTx) error {
   368  		meta := &Meta{}
   369  		// Check if DB is already initialized.
   370  		err := fetchMeta(meta, tx)
   371  		if err == nil {
   372  			return nil
   373  		}
   374  
   375  		for _, tlb := range dbTopLevelBuckets {
   376  			if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
   377  				return err
   378  			}
   379  		}
   380  
   381  		meta.DbVersionNumber = getLatestDBVersion(dbVersions)
   382  		return putMeta(meta, tx)
   383  	}, func() {})
   384  	if err != nil {
   385  		return fmt.Errorf("unable to create new channeldb: %v", err)
   386  	}
   387  
   388  	return nil
   389  }
   390  
   391  // fileExists returns true if the file exists, and false otherwise.
   392  func fileExists(path string) bool {
   393  	if _, err := os.Stat(path); err != nil {
   394  		if os.IsNotExist(err) {
   395  			return false
   396  		}
   397  	}
   398  
   399  	return true
   400  }
   401  
   402  // ChannelStateDB is a database that keeps track of all channel state.
   403  type ChannelStateDB struct {
   404  	// linkNodeDB separates all DB operations on LinkNodes.
   405  	linkNodeDB *LinkNodeDB
   406  
   407  	// parent holds a pointer to the "main" channeldb.DB object. This is
   408  	// only used for testing and should never be used in production code.
   409  	// For testing use the ChannelStateDB.GetParentDB() function to retrieve
   410  	// this pointer.
   411  	parent *DB
   412  
   413  	// backend points to the actual backend holding the channel state
   414  	// database. This may be a real backend or a cache middleware.
   415  	backend kvdb.Backend
   416  }
   417  
   418  // GetParentDB returns the "main" channeldb.DB object that is the owner of this
   419  // ChannelStateDB instance. Use this function only in tests where passing around
   420  // pointers makes testing less readable. Never to be used in production code!
   421  func (c *ChannelStateDB) GetParentDB() *DB {
   422  	return c.parent
   423  }
   424  
   425  // LinkNodeDB returns the current instance of the link node database.
   426  func (c *ChannelStateDB) LinkNodeDB() *LinkNodeDB {
   427  	return c.linkNodeDB
   428  }
   429  
   430  // FetchOpenChannels starts a new database transaction and returns all stored
   431  // currently active/open channels associated with the target nodeID. In the case
   432  // that no active channels are known to have been created with this node, then a
   433  // zero-length slice is returned.
   434  func (c *ChannelStateDB) FetchOpenChannels(nodeID *secp256k1.PublicKey) (
   435  	[]*OpenChannel, error) {
   436  
   437  	var channels []*OpenChannel
   438  	err := kvdb.View(c.backend, func(tx kvdb.RTx) error {
   439  		var err error
   440  		channels, err = c.fetchOpenChannels(tx, nodeID)
   441  		return err
   442  	}, func() {
   443  		channels = nil
   444  	})
   445  
   446  	return channels, err
   447  }
   448  
   449  // fetchOpenChannels uses and existing database transaction and returns all
   450  // stored currently active/open channels associated with the target nodeID. In
   451  // the case that no active channels are known to have been created with this
   452  // node, then a zero-length slice is returned.
   453  func (c *ChannelStateDB) fetchOpenChannels(tx kvdb.RTx,
   454  	nodeID *secp256k1.PublicKey) ([]*OpenChannel, error) {
   455  
   456  	// Get the bucket dedicated to storing the metadata for open channels.
   457  	openChanBucket := tx.ReadBucket(openChannelBucket)
   458  	if openChanBucket == nil {
   459  		return nil, nil
   460  	}
   461  
   462  	// Within this top level bucket, fetch the bucket dedicated to storing
   463  	// open channel data specific to the remote node.
   464  	pub := nodeID.SerializeCompressed()
   465  	nodeChanBucket := openChanBucket.NestedReadBucket(pub)
   466  	if nodeChanBucket == nil {
   467  		return nil, nil
   468  	}
   469  
   470  	// Next, we'll need to go down an additional layer in order to retrieve
   471  	// the channels for each chain the node knows of.
   472  	var channels []*OpenChannel
   473  	err := nodeChanBucket.ForEach(func(chainHash, v []byte) error {
   474  		// If there's a value, it's not a bucket so ignore it.
   475  		if v != nil {
   476  			return nil
   477  		}
   478  
   479  		// If we've found a valid chainhash bucket, then we'll retrieve
   480  		// that so we can extract all the channels.
   481  		chainBucket := nodeChanBucket.NestedReadBucket(chainHash)
   482  		if chainBucket == nil {
   483  			return fmt.Errorf("unable to read bucket for chain=%x",
   484  				chainHash)
   485  		}
   486  
   487  		// Finally, we both of the necessary buckets retrieved, fetch
   488  		// all the active channels related to this node.
   489  		nodeChannels, err := c.fetchNodeChannels(chainBucket)
   490  		if err != nil {
   491  			return fmt.Errorf("unable to read channel for "+
   492  				"chain_hash=%x, node_key=%x: %v",
   493  				chainHash, pub, err)
   494  		}
   495  
   496  		channels = append(channels, nodeChannels...)
   497  		return nil
   498  	})
   499  
   500  	return channels, err
   501  }
   502  
   503  // fetchNodeChannels retrieves all active channels from the target chainBucket
   504  // which is under a node's dedicated channel bucket. This function is typically
   505  // used to fetch all the active channels related to a particular node.
   506  func (c *ChannelStateDB) fetchNodeChannels(chainBucket kvdb.RBucket) (
   507  	[]*OpenChannel, error) {
   508  
   509  	var channels []*OpenChannel
   510  
   511  	// A node may have channels on several chains, so for each known chain,
   512  	// we'll extract all the channels.
   513  	err := chainBucket.ForEach(func(chanPoint, v []byte) error {
   514  		// If there's a value, it's not a bucket so ignore it.
   515  		if v != nil {
   516  			return nil
   517  		}
   518  
   519  		// Once we've found a valid channel bucket, we'll extract it
   520  		// from the node's chain bucket.
   521  		chanBucket := chainBucket.NestedReadBucket(chanPoint)
   522  
   523  		var outPoint wire.OutPoint
   524  		err := readOutpoint(bytes.NewReader(chanPoint), &outPoint)
   525  		if err != nil {
   526  			return err
   527  		}
   528  		oChannel, err := fetchOpenChannel(chanBucket, &outPoint)
   529  		if err != nil {
   530  			return fmt.Errorf("unable to read channel data for "+
   531  				"chan_point=%v: %v", outPoint, err)
   532  		}
   533  		oChannel.Db = c
   534  
   535  		channels = append(channels, oChannel)
   536  
   537  		return nil
   538  	})
   539  	if err != nil {
   540  		return nil, err
   541  	}
   542  
   543  	return channels, nil
   544  }
   545  
   546  // FetchChannel attempts to locate a channel specified by the passed channel
   547  // point. If the channel cannot be found, then an error will be returned.
   548  // Optionally an existing db tx can be supplied. Optionally an existing db tx
   549  // can be supplied.
   550  func (c *ChannelStateDB) FetchChannel(tx kvdb.RTx, chanPoint wire.OutPoint) (
   551  	*OpenChannel, error) {
   552  
   553  	var (
   554  		targetChan      *OpenChannel
   555  		targetChanPoint bytes.Buffer
   556  	)
   557  
   558  	if err := writeOutpoint(&targetChanPoint, &chanPoint); err != nil {
   559  		return nil, err
   560  	}
   561  
   562  	// chanScan will traverse the following bucket structure:
   563  	//  * nodePub => chainHash => chanPoint
   564  	//
   565  	// At each level we go one further, ensuring that we're traversing the
   566  	// proper key (that's actually a bucket). By only reading the bucket
   567  	// structure and skipping fully decoding each channel, we save a good
   568  	// bit of CPU as we don't need to do things like decompress public
   569  	// keys.
   570  	chanScan := func(tx kvdb.RTx) error {
   571  		// Get the bucket dedicated to storing the metadata for open
   572  		// channels.
   573  		openChanBucket := tx.ReadBucket(openChannelBucket)
   574  		if openChanBucket == nil {
   575  			return ErrNoActiveChannels
   576  		}
   577  
   578  		// Within the node channel bucket, are the set of node pubkeys
   579  		// we have channels with, we don't know the entire set, so
   580  		// we'll check them all.
   581  		return openChanBucket.ForEach(func(nodePub, v []byte) error {
   582  			// Ensure that this is a key the same size as a pubkey,
   583  			// and also that it leads directly to a bucket.
   584  			if len(nodePub) != 33 || v != nil {
   585  				return nil
   586  			}
   587  
   588  			nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
   589  			if nodeChanBucket == nil {
   590  				return nil
   591  			}
   592  
   593  			// The next layer down is all the chains that this node
   594  			// has channels on with us.
   595  			return nodeChanBucket.ForEach(func(chainHash, v []byte) error {
   596  				// If there's a value, it's not a bucket so
   597  				// ignore it.
   598  				if v != nil {
   599  					return nil
   600  				}
   601  
   602  				chainBucket := nodeChanBucket.NestedReadBucket(
   603  					chainHash,
   604  				)
   605  				if chainBucket == nil {
   606  					return fmt.Errorf("unable to read "+
   607  						"bucket for chain=%x", chainHash[:])
   608  				}
   609  
   610  				// Finally we reach the leaf bucket that stores
   611  				// all the chanPoints for this node.
   612  				chanBucket := chainBucket.NestedReadBucket(
   613  					targetChanPoint.Bytes(),
   614  				)
   615  				if chanBucket == nil {
   616  					return nil
   617  				}
   618  
   619  				channel, err := fetchOpenChannel(
   620  					chanBucket, &chanPoint,
   621  				)
   622  				if err != nil {
   623  					return err
   624  				}
   625  
   626  				targetChan = channel
   627  				targetChan.Db = c
   628  
   629  				return nil
   630  			})
   631  		})
   632  	}
   633  
   634  	var err error
   635  	if tx == nil {
   636  		err = kvdb.View(c.backend, chanScan, func() {})
   637  	} else {
   638  		err = chanScan(tx)
   639  	}
   640  	if err != nil {
   641  		return nil, err
   642  	}
   643  
   644  	if targetChan != nil {
   645  		return targetChan, nil
   646  	}
   647  
   648  	// If we can't find the channel, then we return with an error, as we
   649  	// have nothing to  backup.
   650  	return nil, ErrChannelNotFound
   651  }
   652  
   653  // FetchAllChannels attempts to retrieve all open channels currently stored
   654  // within the database, including pending open, fully open and channels waiting
   655  // for a closing transaction to confirm.
   656  func (c *ChannelStateDB) FetchAllChannels() ([]*OpenChannel, error) {
   657  	return fetchChannels(c)
   658  }
   659  
   660  // FetchAllOpenChannels will return all channels that have the funding
   661  // transaction confirmed, and is not waiting for a closing transaction to be
   662  // confirmed.
   663  func (c *ChannelStateDB) FetchAllOpenChannels() ([]*OpenChannel, error) {
   664  	return fetchChannels(
   665  		c,
   666  		pendingChannelFilter(false),
   667  		waitingCloseFilter(false),
   668  	)
   669  }
   670  
   671  // FetchPendingChannels will return channels that have completed the process of
   672  // generating and broadcasting funding transactions, but whose funding
   673  // transactions have yet to be confirmed on the blockchain.
   674  func (c *ChannelStateDB) FetchPendingChannels() ([]*OpenChannel, error) {
   675  	return fetchChannels(c,
   676  		pendingChannelFilter(true),
   677  		waitingCloseFilter(false),
   678  	)
   679  }
   680  
   681  // FetchWaitingCloseChannels will return all channels that have been opened,
   682  // but are now waiting for a closing transaction to be confirmed.
   683  //
   684  // NOTE: This includes channels that are also pending to be opened.
   685  func (c *ChannelStateDB) FetchWaitingCloseChannels() ([]*OpenChannel, error) {
   686  	return fetchChannels(
   687  		c, waitingCloseFilter(true),
   688  	)
   689  }
   690  
   691  // fetchChannelsFilter applies a filter to channels retrieved in fetchchannels.
   692  // A set of filters can be combined to filter across multiple dimensions.
   693  type fetchChannelsFilter func(channel *OpenChannel) bool
   694  
   695  // pendingChannelFilter returns a filter based on whether channels are pending
   696  // (ie, their funding transaction still needs to confirm). If pending is false,
   697  // channels with confirmed funding transactions are returned.
   698  func pendingChannelFilter(pending bool) fetchChannelsFilter {
   699  	return func(channel *OpenChannel) bool {
   700  		return channel.IsPending == pending
   701  	}
   702  }
   703  
   704  // waitingCloseFilter returns a filter which filters channels based on whether
   705  // they are awaiting the confirmation of their closing transaction. If waiting
   706  // close is true, channels that have had their closing tx broadcast are
   707  // included. If it is false, channels that are not awaiting confirmation of
   708  // their close transaction are returned.
   709  func waitingCloseFilter(waitingClose bool) fetchChannelsFilter {
   710  	return func(channel *OpenChannel) bool {
   711  		// If the channel is in any other state than Default,
   712  		// then it means it is waiting to be closed.
   713  		channelWaitingClose :=
   714  			channel.ChanStatus() != ChanStatusDefault
   715  
   716  		// Include the channel if it matches the value for
   717  		// waiting close that we are filtering on.
   718  		return channelWaitingClose == waitingClose
   719  	}
   720  }
   721  
   722  // fetchChannels attempts to retrieve channels currently stored in the
   723  // database. It takes a set of filters which are applied to each channel to
   724  // obtain a set of channels with the desired set of properties. Only channels
   725  // which have a true value returned for *all* of the filters will be returned.
   726  // If no filters are provided, every channel in the open channels bucket will
   727  // be returned.
   728  func fetchChannels(c *ChannelStateDB, filters ...fetchChannelsFilter) (
   729  	[]*OpenChannel, error) {
   730  
   731  	var channels []*OpenChannel
   732  
   733  	err := kvdb.View(c.backend, func(tx kvdb.RTx) error {
   734  		// Get the bucket dedicated to storing the metadata for open
   735  		// channels.
   736  		openChanBucket := tx.ReadBucket(openChannelBucket)
   737  		if openChanBucket == nil {
   738  			return ErrNoActiveChannels
   739  		}
   740  
   741  		// Next, fetch the bucket dedicated to storing metadata related
   742  		// to all nodes. All keys within this bucket are the serialized
   743  		// public keys of all our direct counterparties.
   744  		nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
   745  		if nodeMetaBucket == nil {
   746  			return fmt.Errorf("node bucket not created")
   747  		}
   748  
   749  		// Finally for each node public key in the bucket, fetch all
   750  		// the channels related to this particular node.
   751  		return nodeMetaBucket.ForEach(func(k, v []byte) error {
   752  			nodeChanBucket := openChanBucket.NestedReadBucket(k)
   753  			if nodeChanBucket == nil {
   754  				return nil
   755  			}
   756  
   757  			return nodeChanBucket.ForEach(func(chainHash, v []byte) error {
   758  				// If there's a value, it's not a bucket so
   759  				// ignore it.
   760  				if v != nil {
   761  					return nil
   762  				}
   763  
   764  				// If we've found a valid chainhash bucket,
   765  				// then we'll retrieve that so we can extract
   766  				// all the channels.
   767  				chainBucket := nodeChanBucket.NestedReadBucket(
   768  					chainHash,
   769  				)
   770  				if chainBucket == nil {
   771  					return fmt.Errorf("unable to read "+
   772  						"bucket for chain=%x", chainHash)
   773  				}
   774  
   775  				nodeChans, err := c.fetchNodeChannels(chainBucket)
   776  				if err != nil {
   777  					return fmt.Errorf("unable to read "+
   778  						"channel for chain_hash=%x, "+
   779  						"node_key=%x: %v", chainHash, k, err)
   780  				}
   781  				for _, channel := range nodeChans {
   782  					// includeChannel indicates whether the channel
   783  					// meets the criteria specified by our filters.
   784  					includeChannel := true
   785  
   786  					// Run through each filter and check whether the
   787  					// channel should be included.
   788  					for _, f := range filters {
   789  						// If the channel fails the filter, set
   790  						// includeChannel to false and don't bother
   791  						// checking the remaining filters.
   792  						if !f(channel) {
   793  							includeChannel = false
   794  							break
   795  						}
   796  					}
   797  
   798  					// If the channel passed every filter, include it in
   799  					// our set of channels.
   800  					if includeChannel {
   801  						channels = append(channels, channel)
   802  					}
   803  				}
   804  				return nil
   805  			})
   806  
   807  		})
   808  	}, func() {
   809  		channels = nil
   810  	})
   811  	if err != nil {
   812  		return nil, err
   813  	}
   814  
   815  	return channels, nil
   816  }
   817  
   818  // FetchClosedChannels attempts to fetch all closed channels from the database.
   819  // The pendingOnly bool toggles if channels that aren't yet fully closed should
   820  // be returned in the response or not. When a channel was cooperatively closed,
   821  // it becomes fully closed after a single confirmation.  When a channel was
   822  // forcibly closed, it will become fully closed after _all_ the pending funds
   823  // (if any) have been swept.
   824  func (c *ChannelStateDB) FetchClosedChannels(pendingOnly bool) (
   825  	[]*ChannelCloseSummary, error) {
   826  
   827  	var chanSummaries []*ChannelCloseSummary
   828  
   829  	if err := kvdb.View(c.backend, func(tx kvdb.RTx) error {
   830  		closeBucket := tx.ReadBucket(closedChannelBucket)
   831  		if closeBucket == nil {
   832  			return ErrNoClosedChannels
   833  		}
   834  
   835  		return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) error {
   836  			summaryReader := bytes.NewReader(summaryBytes)
   837  			chanSummary, err := deserializeCloseChannelSummary(summaryReader)
   838  			if err != nil {
   839  				return err
   840  			}
   841  
   842  			// If the query specified to only include pending
   843  			// channels, then we'll skip any channels which aren't
   844  			// currently pending.
   845  			if !chanSummary.IsPending && pendingOnly {
   846  				return nil
   847  			}
   848  
   849  			chanSummaries = append(chanSummaries, chanSummary)
   850  			return nil
   851  		})
   852  	}, func() {
   853  		chanSummaries = nil
   854  	}); err != nil {
   855  		return nil, err
   856  	}
   857  
   858  	return chanSummaries, nil
   859  }
   860  
   861  // ErrClosedChannelNotFound signals that a closed channel could not be found in
   862  // the channeldb.
   863  var ErrClosedChannelNotFound = errors.New("unable to find closed channel summary")
   864  
   865  // FetchClosedChannel queries for a channel close summary using the channel
   866  // point of the channel in question.
   867  func (c *ChannelStateDB) FetchClosedChannel(chanID *wire.OutPoint) (
   868  	*ChannelCloseSummary, error) {
   869  
   870  	var chanSummary *ChannelCloseSummary
   871  	if err := kvdb.View(c.backend, func(tx kvdb.RTx) error {
   872  		closeBucket := tx.ReadBucket(closedChannelBucket)
   873  		if closeBucket == nil {
   874  			return ErrClosedChannelNotFound
   875  		}
   876  
   877  		var b bytes.Buffer
   878  		var err error
   879  		if err = writeOutpoint(&b, chanID); err != nil {
   880  			return err
   881  		}
   882  
   883  		summaryBytes := closeBucket.Get(b.Bytes())
   884  		if summaryBytes == nil {
   885  			return ErrClosedChannelNotFound
   886  		}
   887  
   888  		summaryReader := bytes.NewReader(summaryBytes)
   889  		chanSummary, err = deserializeCloseChannelSummary(summaryReader)
   890  
   891  		return err
   892  	}, func() {
   893  		chanSummary = nil
   894  	}); err != nil {
   895  		return nil, err
   896  	}
   897  
   898  	return chanSummary, nil
   899  }
   900  
   901  // FetchClosedChannelForID queries for a channel close summary using the
   902  // channel ID of the channel in question.
   903  func (c *ChannelStateDB) FetchClosedChannelForID(cid lnwire.ChannelID) (
   904  	*ChannelCloseSummary, error) {
   905  
   906  	var chanSummary *ChannelCloseSummary
   907  	if err := kvdb.View(c.backend, func(tx kvdb.RTx) error {
   908  		closeBucket := tx.ReadBucket(closedChannelBucket)
   909  		if closeBucket == nil {
   910  			return ErrClosedChannelNotFound
   911  		}
   912  
   913  		// The first 30 bytes of the channel ID and outpoint will be
   914  		// equal.
   915  		cursor := closeBucket.ReadCursor()
   916  		op, c := cursor.Seek(cid[:30])
   917  
   918  		// We scan over all possible candidates for this channel ID.
   919  		for ; op != nil && bytes.Compare(cid[:30], op[:30]) <= 0; op, c = cursor.Next() {
   920  			var outPoint wire.OutPoint
   921  			err := readOutpoint(bytes.NewReader(op), &outPoint)
   922  			if err != nil {
   923  				return err
   924  			}
   925  
   926  			// If the found outpoint does not correspond to this
   927  			// channel ID, we continue.
   928  			if !cid.IsChanPoint(&outPoint) {
   929  				continue
   930  			}
   931  
   932  			// Deserialize the close summary and return.
   933  			r := bytes.NewReader(c)
   934  			chanSummary, err = deserializeCloseChannelSummary(r)
   935  			if err != nil {
   936  				return err
   937  			}
   938  
   939  			return nil
   940  		}
   941  		return ErrClosedChannelNotFound
   942  	}, func() {
   943  		chanSummary = nil
   944  	}); err != nil {
   945  		return nil, err
   946  	}
   947  
   948  	return chanSummary, nil
   949  }
   950  
   951  // MarkChanFullyClosed marks a channel as fully closed within the database. A
   952  // channel should be marked as fully closed if the channel was initially
   953  // cooperatively closed and it's reached a single confirmation, or after all
   954  // the pending funds in a channel that has been forcibly closed have been
   955  // swept.
   956  func (c *ChannelStateDB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
   957  	var (
   958  		openChannels  []*OpenChannel
   959  		pruneLinkNode *secp256k1.PublicKey
   960  	)
   961  	err := kvdb.Update(c.backend, func(tx kvdb.RwTx) error {
   962  		var b bytes.Buffer
   963  		if err := writeOutpoint(&b, chanPoint); err != nil {
   964  			return err
   965  		}
   966  
   967  		chanID := b.Bytes()
   968  
   969  		closedChanBucket, err := tx.CreateTopLevelBucket(
   970  			closedChannelBucket,
   971  		)
   972  		if err != nil {
   973  			return err
   974  		}
   975  
   976  		chanSummaryBytes := closedChanBucket.Get(chanID)
   977  		if chanSummaryBytes == nil {
   978  			return fmt.Errorf("no closed channel for "+
   979  				"chan_point=%v found", chanPoint)
   980  		}
   981  
   982  		chanSummaryReader := bytes.NewReader(chanSummaryBytes)
   983  		chanSummary, err := deserializeCloseChannelSummary(
   984  			chanSummaryReader,
   985  		)
   986  		if err != nil {
   987  			return err
   988  		}
   989  
   990  		chanSummary.IsPending = false
   991  
   992  		var newSummary bytes.Buffer
   993  		err = serializeChannelCloseSummary(&newSummary, chanSummary)
   994  		if err != nil {
   995  			return err
   996  		}
   997  
   998  		err = closedChanBucket.Put(chanID, newSummary.Bytes())
   999  		if err != nil {
  1000  			return err
  1001  		}
  1002  
  1003  		// Now that the channel is closed, we'll check if we have any
  1004  		// other open channels with this peer. If we don't we'll
  1005  		// garbage collect it to ensure we don't establish persistent
  1006  		// connections to peers without open channels.
  1007  		pruneLinkNode = chanSummary.RemotePub
  1008  		openChannels, err = c.fetchOpenChannels(
  1009  			tx, pruneLinkNode,
  1010  		)
  1011  		if err != nil {
  1012  			return fmt.Errorf("unable to fetch open channels for "+
  1013  				"peer %x: %v",
  1014  				pruneLinkNode.SerializeCompressed(), err)
  1015  		}
  1016  
  1017  		return nil
  1018  	}, func() {
  1019  		openChannels = nil
  1020  		pruneLinkNode = nil
  1021  	})
  1022  	if err != nil {
  1023  		return err
  1024  	}
  1025  
  1026  	// Decide whether we want to remove the link node, based upon the number
  1027  	// of still open channels.
  1028  	return c.pruneLinkNode(openChannels, pruneLinkNode)
  1029  }
  1030  
  1031  // pruneLinkNode determines whether we should garbage collect a link node from
  1032  // the database due to no longer having any open channels with it. If there are
  1033  // any left, then this acts as a no-op.
  1034  func (c *ChannelStateDB) pruneLinkNode(openChannels []*OpenChannel,
  1035  	remotePub *secp256k1.PublicKey) error {
  1036  
  1037  	if len(openChannels) > 0 {
  1038  		return nil
  1039  	}
  1040  
  1041  	log.Infof("Pruning link node %x with zero open channels from database",
  1042  		remotePub.SerializeCompressed())
  1043  
  1044  	return c.linkNodeDB.DeleteLinkNode(remotePub)
  1045  }
  1046  
  1047  // PruneLinkNodes attempts to prune all link nodes found within the databse with
  1048  // whom we no longer have any open channels with.
  1049  func (c *ChannelStateDB) PruneLinkNodes() error {
  1050  	allLinkNodes, err := c.linkNodeDB.FetchAllLinkNodes()
  1051  	if err != nil {
  1052  		return err
  1053  	}
  1054  
  1055  	for _, linkNode := range allLinkNodes {
  1056  		var (
  1057  			openChannels []*OpenChannel
  1058  			linkNode     = linkNode
  1059  		)
  1060  		err := kvdb.View(c.backend, func(tx kvdb.RTx) error {
  1061  			var err error
  1062  			openChannels, err = c.fetchOpenChannels(
  1063  				tx, linkNode.IdentityPub,
  1064  			)
  1065  			return err
  1066  		}, func() {
  1067  			openChannels = nil
  1068  		})
  1069  		if err != nil {
  1070  			return err
  1071  		}
  1072  
  1073  		err = c.pruneLinkNode(openChannels, linkNode.IdentityPub)
  1074  		if err != nil {
  1075  			return err
  1076  		}
  1077  	}
  1078  
  1079  	return nil
  1080  }
  1081  
  1082  // ChannelShell is a shell of a channel that is meant to be used for channel
  1083  // recovery purposes. It contains a minimal OpenChannel instance along with
  1084  // addresses for that target node.
  1085  type ChannelShell struct {
  1086  	// NodeAddrs the set of addresses that this node has known to be
  1087  	// reachable at in the past.
  1088  	NodeAddrs []net.Addr
  1089  
  1090  	// Chan is a shell of an OpenChannel, it contains only the items
  1091  	// required to restore the channel on disk.
  1092  	Chan *OpenChannel
  1093  }
  1094  
  1095  // RestoreChannelShells is a method that allows the caller to reconstruct the
  1096  // state of an OpenChannel from the ChannelShell. We'll attempt to write the
  1097  // new channel to disk, create a LinkNode instance with the passed node
  1098  // addresses, and finally create an edge within the graph for the channel as
  1099  // well. This method is idempotent, so repeated calls with the same set of
  1100  // channel shells won't modify the database after the initial call.
  1101  func (c *ChannelStateDB) RestoreChannelShells(channelShells ...*ChannelShell) error {
  1102  	err := kvdb.Update(c.backend, func(tx kvdb.RwTx) error {
  1103  		for _, channelShell := range channelShells {
  1104  			channel := channelShell.Chan
  1105  
  1106  			// When we make a channel, we mark that the channel has
  1107  			// been restored, this will signal to other sub-systems
  1108  			// to not attempt to use the channel as if it was a
  1109  			// regular one.
  1110  			channel.chanStatus |= ChanStatusRestored
  1111  
  1112  			// First, we'll attempt to create a new open channel
  1113  			// and link node for this channel. If the channel
  1114  			// already exists, then in order to ensure this method
  1115  			// is idempotent, we'll continue to the next step.
  1116  			channel.Db = c
  1117  			err := syncNewChannel(
  1118  				tx, channel, channelShell.NodeAddrs,
  1119  			)
  1120  			if err != nil {
  1121  				return err
  1122  			}
  1123  		}
  1124  
  1125  		return nil
  1126  	}, func() {})
  1127  	if err != nil {
  1128  		return err
  1129  	}
  1130  
  1131  	return nil
  1132  }
  1133  
  1134  // AddrsForNode consults the graph and channel database for all addresses known
  1135  // to the passed node public key.
  1136  func (d *DB) AddrsForNode(nodePub *secp256k1.PublicKey) ([]net.Addr,
  1137  	error) {
  1138  
  1139  	linkNode, err := d.channelStateDB.linkNodeDB.FetchLinkNode(nodePub)
  1140  	if err != nil {
  1141  		return nil, err
  1142  	}
  1143  
  1144  	// We'll also query the graph for this peer to see if they have any
  1145  	// addresses that we don't currently have stored within the link node
  1146  	// database.
  1147  	pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
  1148  	if err != nil {
  1149  		return nil, err
  1150  	}
  1151  	graphNode, err := d.graph.FetchLightningNode(pubKey)
  1152  	if err != nil && err != ErrGraphNodeNotFound {
  1153  		return nil, err
  1154  	} else if err == ErrGraphNodeNotFound {
  1155  		// If the node isn't found, then that's OK, as we still have the
  1156  		// link node data. But any other error needs to be returned.
  1157  		graphNode = &LightningNode{}
  1158  	}
  1159  
  1160  	// Now that we have both sources of addrs for this node, we'll use a
  1161  	// map to de-duplicate any addresses between the two sources, and
  1162  	// produce a final list of the combined addrs.
  1163  	addrs := make(map[string]net.Addr)
  1164  	for _, addr := range linkNode.Addresses {
  1165  		addrs[addr.String()] = addr
  1166  	}
  1167  	for _, addr := range graphNode.Addresses {
  1168  		addrs[addr.String()] = addr
  1169  	}
  1170  	dedupedAddrs := make([]net.Addr, 0, len(addrs))
  1171  	for _, addr := range addrs {
  1172  		dedupedAddrs = append(dedupedAddrs, addr)
  1173  	}
  1174  
  1175  	return dedupedAddrs, nil
  1176  }
  1177  
  1178  // AbandonChannel attempts to remove the target channel from the open channel
  1179  // database. If the channel was already removed (has a closed channel entry),
  1180  // then we'll return a nil error. Otherwise, we'll insert a new close summary
  1181  // into the database.
  1182  func (c *ChannelStateDB) AbandonChannel(chanPoint *wire.OutPoint,
  1183  	bestHeight uint32) error {
  1184  
  1185  	// With the chanPoint constructed, we'll attempt to find the target
  1186  	// channel in the database. If we can't find the channel, then we'll
  1187  	// return the error back to the caller.
  1188  	dbChan, err := c.FetchChannel(nil, *chanPoint)
  1189  	switch {
  1190  	// If the channel wasn't found, then it's possible that it was already
  1191  	// abandoned from the database.
  1192  	case err == ErrChannelNotFound:
  1193  		_, closedErr := c.FetchClosedChannel(chanPoint)
  1194  		if closedErr != nil {
  1195  			return closedErr
  1196  		}
  1197  
  1198  		// If the channel was already closed, then we don't return an
  1199  		// error as we'd like fro this step to be repeatable.
  1200  		return nil
  1201  	case err != nil:
  1202  		return err
  1203  	}
  1204  
  1205  	// Now that we've found the channel, we'll populate a close summary for
  1206  	// the channel, so we can store as much information for this abounded
  1207  	// channel as possible. We also ensure that we set Pending to false, to
  1208  	// indicate that this channel has been "fully" closed.
  1209  	summary := &ChannelCloseSummary{
  1210  		CloseType:               Abandoned,
  1211  		ChanPoint:               *chanPoint,
  1212  		ChainHash:               dbChan.ChainHash,
  1213  		CloseHeight:             bestHeight,
  1214  		RemotePub:               dbChan.IdentityPub,
  1215  		Capacity:                dbChan.Capacity,
  1216  		SettledBalance:          dbChan.LocalCommitment.LocalBalance.ToAtoms(),
  1217  		ShortChanID:             dbChan.ShortChanID(),
  1218  		RemoteCurrentRevocation: dbChan.RemoteCurrentRevocation,
  1219  		RemoteNextRevocation:    dbChan.RemoteNextRevocation,
  1220  		LocalChanConfig:         dbChan.LocalChanCfg,
  1221  	}
  1222  
  1223  	// Finally, we'll close the channel in the DB, and return back to the
  1224  	// caller. We set ourselves as the close initiator because we abandoned
  1225  	// the channel.
  1226  	return dbChan.CloseChannel(summary, ChanStatusLocalCloseInitiator)
  1227  }
  1228  
  1229  // SaveChannelOpeningState saves the serialized channel state for the provided
  1230  // chanPoint to the channelOpeningStateBucket.
  1231  func (c *ChannelStateDB) SaveChannelOpeningState(outPoint,
  1232  	serializedState []byte) error {
  1233  
  1234  	return kvdb.Update(c.backend, func(tx kvdb.RwTx) error {
  1235  		bucket, err := tx.CreateTopLevelBucket(channelOpeningStateBucket)
  1236  		if err != nil {
  1237  			return err
  1238  		}
  1239  
  1240  		return bucket.Put(outPoint, serializedState)
  1241  	}, func() {})
  1242  }
  1243  
  1244  // GetChannelOpeningState fetches the serialized channel state for the provided
  1245  // outPoint from the database, or returns ErrChannelNotFound if the channel
  1246  // is not found.
  1247  func (c *ChannelStateDB) GetChannelOpeningState(outPoint []byte) ([]byte, error) {
  1248  	var serializedState []byte
  1249  	err := kvdb.View(c.backend, func(tx kvdb.RTx) error {
  1250  		bucket := tx.ReadBucket(channelOpeningStateBucket)
  1251  		if bucket == nil {
  1252  			// If the bucket does not exist, it means we never added
  1253  			//  a channel to the db, so return ErrChannelNotFound.
  1254  			return ErrChannelNotFound
  1255  		}
  1256  
  1257  		serializedState = bucket.Get(outPoint)
  1258  		if serializedState == nil {
  1259  			return ErrChannelNotFound
  1260  		}
  1261  
  1262  		return nil
  1263  	}, func() {
  1264  		serializedState = nil
  1265  	})
  1266  	return serializedState, err
  1267  }
  1268  
  1269  // DeleteChannelOpeningState removes any state for outPoint from the database.
  1270  func (c *ChannelStateDB) DeleteChannelOpeningState(outPoint []byte) error {
  1271  	return kvdb.Update(c.backend, func(tx kvdb.RwTx) error {
  1272  		bucket := tx.ReadWriteBucket(channelOpeningStateBucket)
  1273  		if bucket == nil {
  1274  			return ErrChannelNotFound
  1275  		}
  1276  
  1277  		return bucket.Delete(outPoint)
  1278  	}, func() {})
  1279  }
  1280  
  1281  // syncVersions function is used for safe db version synchronization. It
  1282  // applies migration functions to the current database and recovers the
  1283  // previous state of db if at least one error/panic appeared during migration.
  1284  func (d *DB) syncVersions(versions []version) error {
  1285  	meta, err := d.FetchMeta(nil)
  1286  	if err != nil {
  1287  		if err == ErrMetaNotFound {
  1288  			meta = &Meta{}
  1289  		} else {
  1290  			return err
  1291  		}
  1292  	}
  1293  
  1294  	latestVersion := getLatestDBVersion(versions)
  1295  	log.Infof("Checking for schema update: latest_version=%v, "+
  1296  		"db_version=%v", latestVersion, meta.DbVersionNumber)
  1297  
  1298  	switch {
  1299  
  1300  	// If the database reports a higher version that we are aware of, the
  1301  	// user is probably trying to revert to a prior version of lnd. We fail
  1302  	// here to prevent reversions and unintended corruption.
  1303  	case meta.DbVersionNumber > latestVersion:
  1304  		log.Errorf("Refusing to revert from db_version=%d to "+
  1305  			"lower version=%d", meta.DbVersionNumber,
  1306  			latestVersion)
  1307  		return ErrDBReversion
  1308  
  1309  	// If the current database version matches the latest version number,
  1310  	// then we don't need to perform any migrations.
  1311  	case meta.DbVersionNumber == latestVersion:
  1312  		return nil
  1313  	}
  1314  
  1315  	log.Infof("Performing database schema migration")
  1316  
  1317  	// Otherwise, we fetch the migrations which need to applied, and
  1318  	// execute them serially within a single database transaction to ensure
  1319  	// the migration is atomic.
  1320  	migrations, migrationVersions := getMigrationsToApply(
  1321  		versions, meta.DbVersionNumber,
  1322  	)
  1323  	return kvdb.Update(d, func(tx kvdb.RwTx) error {
  1324  		for i, migration := range migrations {
  1325  			if migration == nil {
  1326  				continue
  1327  			}
  1328  
  1329  			log.Infof("Applying migration #%v", migrationVersions[i])
  1330  
  1331  			if err := migration(tx); err != nil {
  1332  				log.Infof("Unable to apply migration #%v",
  1333  					migrationVersions[i])
  1334  				return err
  1335  			}
  1336  		}
  1337  
  1338  		meta.DbVersionNumber = latestVersion
  1339  		err := putMeta(meta, tx)
  1340  		if err != nil {
  1341  			return err
  1342  		}
  1343  
  1344  		// In dry-run mode, return an error to prevent the transaction
  1345  		// from committing.
  1346  		if d.dryRun {
  1347  			// In dry run mode, also attempt dcrlnd migrations
  1348  			// before stopping.
  1349  			if err := d.syncDcrlndDBVersions(tx); err != nil {
  1350  				return err
  1351  			}
  1352  
  1353  			return ErrDryRunMigrationOK
  1354  		}
  1355  
  1356  		return nil
  1357  	}, func() {})
  1358  }
  1359  
  1360  // ChannelGraph returns the current instance of the directed channel graph.
  1361  func (d *DB) ChannelGraph() *ChannelGraph {
  1362  	return d.graph
  1363  }
  1364  
  1365  // ChannelStateDB returns the sub database that is concerned with the channel
  1366  // state.
  1367  func (d *DB) ChannelStateDB() *ChannelStateDB {
  1368  	return d.channelStateDB
  1369  }
  1370  
  1371  func getLatestDBVersion(versions []version) uint32 {
  1372  	return versions[len(versions)-1].number
  1373  }
  1374  
  1375  // getMigrationsToApply retrieves the migration function that should be
  1376  // applied to the database.
  1377  func getMigrationsToApply(versions []version, version uint32) ([]migration, []uint32) {
  1378  	migrations := make([]migration, 0, len(versions))
  1379  	migrationVersions := make([]uint32, 0, len(versions))
  1380  
  1381  	for _, v := range versions {
  1382  		if v.number > version {
  1383  			migrations = append(migrations, v.migration)
  1384  			migrationVersions = append(migrationVersions, v.number)
  1385  		}
  1386  	}
  1387  
  1388  	return migrations, migrationVersions
  1389  }
  1390  
  1391  // fetchHistoricalChanBucket returns a the channel bucket for a given outpoint
  1392  // from the historical channel bucket. If the bucket does not exist,
  1393  // ErrNoHistoricalBucket is returned.
  1394  func fetchHistoricalChanBucket(tx kvdb.RTx,
  1395  	outPoint *wire.OutPoint) (kvdb.RBucket, error) {
  1396  
  1397  	// First fetch the top level bucket which stores all data related to
  1398  	// historically stored channels.
  1399  	historicalChanBucket := tx.ReadBucket(historicalChannelBucket)
  1400  	if historicalChanBucket == nil {
  1401  		return nil, ErrNoHistoricalBucket
  1402  	}
  1403  
  1404  	// With the bucket for the node and chain fetched, we can now go down
  1405  	// another level, for the channel itself.
  1406  	var chanPointBuf bytes.Buffer
  1407  	if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
  1408  		return nil, err
  1409  	}
  1410  	chanBucket := historicalChanBucket.NestedReadBucket(chanPointBuf.Bytes())
  1411  	if chanBucket == nil {
  1412  		return nil, ErrChannelNotFound
  1413  	}
  1414  
  1415  	return chanBucket, nil
  1416  }
  1417  
  1418  // FetchHistoricalChannel fetches open channel data from the historical channel
  1419  // bucket.
  1420  func (c *ChannelStateDB) FetchHistoricalChannel(outPoint *wire.OutPoint) (
  1421  	*OpenChannel, error) {
  1422  
  1423  	var channel *OpenChannel
  1424  	err := kvdb.View(c.backend, func(tx kvdb.RTx) error {
  1425  		chanBucket, err := fetchHistoricalChanBucket(tx, outPoint)
  1426  		if err != nil {
  1427  			return err
  1428  		}
  1429  
  1430  		channel, err = fetchOpenChannel(chanBucket, outPoint)
  1431  		if err != nil {
  1432  			return err
  1433  		}
  1434  
  1435  		channel.Db = c
  1436  		return nil
  1437  	}, func() {
  1438  		channel = nil
  1439  	})
  1440  	if err != nil {
  1441  		return nil, err
  1442  	}
  1443  
  1444  	return channel, nil
  1445  }
  1446  
  1447  // MakeTestDB creates a new instance of the ChannelDB for testing purposes.
  1448  // A callback which cleans up the created temporary directories is also
  1449  // returned and intended to be executed after the test completes.
  1450  func MakeTestDB(modifiers ...OptionModifier) (*DB, func(), error) {
  1451  	// First, create a temporary directory to be used for the duration of
  1452  	// this test.
  1453  	tempDirName, err := ioutil.TempDir("", "channeldb")
  1454  	if err != nil {
  1455  		return nil, nil, err
  1456  	}
  1457  
  1458  	// Next, create channeldb for the first time.
  1459  	backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cdb")
  1460  	if err != nil {
  1461  		if backendCleanup != nil {
  1462  			backendCleanup()
  1463  		}
  1464  		return nil, nil, err
  1465  	}
  1466  
  1467  	cdb, err := CreateWithBackend(backend, modifiers...)
  1468  	if err != nil {
  1469  		if backendCleanup != nil {
  1470  			backendCleanup()
  1471  		}
  1472  		os.RemoveAll(tempDirName)
  1473  		return nil, nil, err
  1474  	}
  1475  
  1476  	cleanUp := func() {
  1477  		cdb.Close()
  1478  		backendCleanup()
  1479  		os.RemoveAll(tempDirName)
  1480  	}
  1481  
  1482  	return cdb, cleanUp, nil
  1483  }