github.com/BlockABC/godash@v0.0.0-20191112120524-f4aa3a32c566/cmd/addblock/import.go (about)

     1  // Copyright (c) 2013-2016 The btcsuite developers
     2  // Copyright (c) 2016 The Dash developers
     3  // Use of this source code is governed by an ISC
     4  // license that can be found in the LICENSE file.
     5  
     6  package main
     7  
     8  import (
     9  	"encoding/binary"
    10  	"fmt"
    11  	"io"
    12  	"sync"
    13  	"time"
    14  
    15  	"github.com/BlockABC/godash/blockchain"
    16  	"github.com/BlockABC/godash/blockchain/indexers"
    17  	"github.com/BlockABC/godash/database"
    18  	"github.com/BlockABC/godash/wire"
    19  	"github.com/BlockABC/godashutil"
    20  )
    21  
    22  var zeroHash = wire.ShaHash{}
    23  
    24  // importResults houses the stats and result as an import operation.
    25  type importResults struct {
    26  	blocksProcessed int64
    27  	blocksImported  int64
    28  	err             error
    29  }
    30  
    31  // blockImporter houses information about an ongoing import from a block data
    32  // file to the block database.
    33  type blockImporter struct {
    34  	db                database.DB
    35  	chain             *blockchain.BlockChain
    36  	r                 io.ReadSeeker
    37  	processQueue      chan []byte
    38  	doneChan          chan bool
    39  	errChan           chan error
    40  	quit              chan struct{}
    41  	wg                sync.WaitGroup
    42  	blocksProcessed   int64
    43  	blocksImported    int64
    44  	receivedLogBlocks int64
    45  	receivedLogTx     int64
    46  	lastHeight        int64
    47  	lastBlockTime     time.Time
    48  	lastLogTime       time.Time
    49  }
    50  
    51  // readBlock reads the next block from the input file.
    52  func (bi *blockImporter) readBlock() ([]byte, error) {
    53  	// The block file format is:
    54  	//  <network> <block length> <serialized block>
    55  	var net uint32
    56  	err := binary.Read(bi.r, binary.LittleEndian, &net)
    57  	if err != nil {
    58  		if err != io.EOF {
    59  			return nil, err
    60  		}
    61  
    62  		// No block and no error means there are no more blocks to read.
    63  		return nil, nil
    64  	}
    65  	if net != uint32(activeNetParams.Net) {
    66  		return nil, fmt.Errorf("network mismatch -- got %x, want %x",
    67  			net, uint32(activeNetParams.Net))
    68  	}
    69  
    70  	// Read the block length and ensure it is sane.
    71  	var blockLen uint32
    72  	if err := binary.Read(bi.r, binary.LittleEndian, &blockLen); err != nil {
    73  		return nil, err
    74  	}
    75  	if blockLen > wire.MaxBlockPayload {
    76  		return nil, fmt.Errorf("block payload of %d bytes is larger "+
    77  			"than the max allowed %d bytes", blockLen,
    78  			wire.MaxBlockPayload)
    79  	}
    80  
    81  	serializedBlock := make([]byte, blockLen)
    82  	if _, err := io.ReadFull(bi.r, serializedBlock); err != nil {
    83  		return nil, err
    84  	}
    85  
    86  	return serializedBlock, nil
    87  }
    88  
    89  // processBlock potentially imports the block into the database.  It first
    90  // deserializes the raw block while checking for errors.  Already known blocks
    91  // are skipped and orphan blocks are considered errors.  Finally, it runs the
    92  // block through the chain rules to ensure it follows all rules and matches
    93  // up to the known checkpoint.  Returns whether the block was imported along
    94  // with any potential errors.
    95  func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) {
    96  	// Deserialize the block which includes checks for malformed blocks.
    97  	block, err := godashutil.NewBlockFromBytes(serializedBlock)
    98  	if err != nil {
    99  		return false, err
   100  	}
   101  
   102  	// update progress statistics
   103  	bi.lastBlockTime = block.MsgBlock().Header.Timestamp
   104  	bi.receivedLogTx += int64(len(block.MsgBlock().Transactions))
   105  
   106  	// Skip blocks that already exist.
   107  	blockSha := block.Sha()
   108  	exists, err := bi.chain.HaveBlock(blockSha)
   109  	if err != nil {
   110  		return false, err
   111  	}
   112  	if exists {
   113  		return false, nil
   114  	}
   115  
   116  	// Don't bother trying to process orphans.
   117  	prevHash := &block.MsgBlock().Header.PrevBlock
   118  	if !prevHash.IsEqual(&zeroHash) {
   119  		exists, err := bi.chain.HaveBlock(prevHash)
   120  		if err != nil {
   121  			return false, err
   122  		}
   123  		if !exists {
   124  			return false, fmt.Errorf("import file contains block "+
   125  				"%v which does not link to the available "+
   126  				"block chain", prevHash)
   127  		}
   128  	}
   129  
   130  	// Ensure the blocks follows all of the chain rules and match up to the
   131  	// known checkpoints.
   132  	isOrphan, err := bi.chain.ProcessBlock(block, blockchain.BFFastAdd)
   133  	if err != nil {
   134  		return false, err
   135  	}
   136  	if isOrphan {
   137  		return false, fmt.Errorf("import file contains an orphan "+
   138  			"block: %v", blockSha)
   139  	}
   140  
   141  	return true, nil
   142  }
   143  
   144  // readHandler is the main handler for reading blocks from the import file.
   145  // This allows block processing to take place in parallel with block reads.
   146  // It must be run as a goroutine.
   147  func (bi *blockImporter) readHandler() {
   148  out:
   149  	for {
   150  		// Read the next block from the file and if anything goes wrong
   151  		// notify the status handler with the error and bail.
   152  		serializedBlock, err := bi.readBlock()
   153  		if err != nil {
   154  			bi.errChan <- fmt.Errorf("Error reading from input "+
   155  				"file: %v", err.Error())
   156  			break out
   157  		}
   158  
   159  		// A nil block with no error means we're done.
   160  		if serializedBlock == nil {
   161  			break out
   162  		}
   163  
   164  		// Send the block or quit if we've been signalled to exit by
   165  		// the status handler due to an error elsewhere.
   166  		select {
   167  		case bi.processQueue <- serializedBlock:
   168  		case <-bi.quit:
   169  			break out
   170  		}
   171  	}
   172  
   173  	// Close the processing channel to signal no more blocks are coming.
   174  	close(bi.processQueue)
   175  	bi.wg.Done()
   176  }
   177  
   178  // logProgress logs block progress as an information message.  In order to
   179  // prevent spam, it limits logging to one message every cfg.Progress seconds
   180  // with duration and totals included.
   181  func (bi *blockImporter) logProgress() {
   182  	bi.receivedLogBlocks++
   183  
   184  	now := time.Now()
   185  	duration := now.Sub(bi.lastLogTime)
   186  	if duration < time.Second*time.Duration(cfg.Progress) {
   187  		return
   188  	}
   189  
   190  	// Truncate the duration to 10s of milliseconds.
   191  	durationMillis := int64(duration / time.Millisecond)
   192  	tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
   193  
   194  	// Log information about new block height.
   195  	blockStr := "blocks"
   196  	if bi.receivedLogBlocks == 1 {
   197  		blockStr = "block"
   198  	}
   199  	txStr := "transactions"
   200  	if bi.receivedLogTx == 1 {
   201  		txStr = "transaction"
   202  	}
   203  	log.Infof("Processed %d %s in the last %s (%d %s, height %d, %s)",
   204  		bi.receivedLogBlocks, blockStr, tDuration, bi.receivedLogTx,
   205  		txStr, bi.lastHeight, bi.lastBlockTime)
   206  
   207  	bi.receivedLogBlocks = 0
   208  	bi.receivedLogTx = 0
   209  	bi.lastLogTime = now
   210  }
   211  
   212  // processHandler is the main handler for processing blocks.  This allows block
   213  // processing to take place in parallel with block reads from the import file.
   214  // It must be run as a goroutine.
   215  func (bi *blockImporter) processHandler() {
   216  out:
   217  	for {
   218  		select {
   219  		case serializedBlock, ok := <-bi.processQueue:
   220  			// We're done when the channel is closed.
   221  			if !ok {
   222  				break out
   223  			}
   224  
   225  			bi.blocksProcessed++
   226  			bi.lastHeight++
   227  			imported, err := bi.processBlock(serializedBlock)
   228  			if err != nil {
   229  				bi.errChan <- err
   230  				break out
   231  			}
   232  
   233  			if imported {
   234  				bi.blocksImported++
   235  			}
   236  
   237  			bi.logProgress()
   238  
   239  		case <-bi.quit:
   240  			break out
   241  		}
   242  	}
   243  	bi.wg.Done()
   244  }
   245  
   246  // statusHandler waits for updates from the import operation and notifies
   247  // the passed doneChan with the results of the import.  It also causes all
   248  // goroutines to exit if an error is reported from any of them.
   249  func (bi *blockImporter) statusHandler(resultsChan chan *importResults) {
   250  	select {
   251  	// An error from either of the goroutines means we're done so signal
   252  	// caller with the error and signal all goroutines to quit.
   253  	case err := <-bi.errChan:
   254  		resultsChan <- &importResults{
   255  			blocksProcessed: bi.blocksProcessed,
   256  			blocksImported:  bi.blocksImported,
   257  			err:             err,
   258  		}
   259  		close(bi.quit)
   260  
   261  	// The import finished normally.
   262  	case <-bi.doneChan:
   263  		resultsChan <- &importResults{
   264  			blocksProcessed: bi.blocksProcessed,
   265  			blocksImported:  bi.blocksImported,
   266  			err:             nil,
   267  		}
   268  	}
   269  }
   270  
   271  // Import is the core function which handles importing the blocks from the file
   272  // associated with the block importer to the database.  It returns a channel
   273  // on which the results will be returned when the operation has completed.
   274  func (bi *blockImporter) Import() chan *importResults {
   275  	// Start up the read and process handling goroutines.  This setup allows
   276  	// blocks to be read from disk in parallel while being processed.
   277  	bi.wg.Add(2)
   278  	go bi.readHandler()
   279  	go bi.processHandler()
   280  
   281  	// Wait for the import to finish in a separate goroutine and signal
   282  	// the status handler when done.
   283  	go func() {
   284  		bi.wg.Wait()
   285  		bi.doneChan <- true
   286  	}()
   287  
   288  	// Start the status handler and return the result channel that it will
   289  	// send the results on when the import is done.
   290  	resultChan := make(chan *importResults)
   291  	go bi.statusHandler(resultChan)
   292  	return resultChan
   293  }
   294  
   295  // newBlockImporter returns a new importer for the provided file reader seeker
   296  // and database.
   297  func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
   298  	// Create the transaction and address indexes if needed.
   299  	//
   300  	// CAUTION: the txindex needs to be first in the indexes array because
   301  	// the addrindex uses data from the txindex during catchup.  If the
   302  	// addrindex is run first, it may not have the transactions from the
   303  	// current block indexed.
   304  	var indexes []indexers.Indexer
   305  	if cfg.TxIndex || cfg.AddrIndex {
   306  		// Enable transaction index if address index is enabled since it
   307  		// requires it.
   308  		if !cfg.TxIndex {
   309  			log.Infof("Transaction index enabled because it is " +
   310  				"required by the address index")
   311  			cfg.TxIndex = true
   312  		} else {
   313  			log.Info("Transaction index is enabled")
   314  		}
   315  		indexes = append(indexes, indexers.NewTxIndex(db))
   316  	}
   317  	if cfg.AddrIndex {
   318  		log.Info("Address index is enabled")
   319  		indexes = append(indexes, indexers.NewAddrIndex(db, activeNetParams))
   320  	}
   321  
   322  	// Create an index manager if any of the optional indexes are enabled.
   323  	var indexManager blockchain.IndexManager
   324  	if len(indexes) > 0 {
   325  		indexManager = indexers.NewManager(db, indexes)
   326  	}
   327  
   328  	chain, err := blockchain.New(&blockchain.Config{
   329  		DB:           db,
   330  		ChainParams:  activeNetParams,
   331  		TimeSource:   blockchain.NewMedianTime(),
   332  		IndexManager: indexManager,
   333  	})
   334  	if err != nil {
   335  		return nil, err
   336  	}
   337  
   338  	return &blockImporter{
   339  		db:           db,
   340  		r:            r,
   341  		processQueue: make(chan []byte, 2),
   342  		doneChan:     make(chan bool),
   343  		errChan:      make(chan error),
   344  		quit:         make(chan struct{}),
   345  		chain:        chain,
   346  		lastLogTime:  time.Now(),
   347  	}, nil
   348  }