github.com/codingfuture/orig-energi3@v0.8.4/miner/worker_test.go (about)

     1  // Copyright 2018 The Energi Core Authors
     2  // Copyright 2018 The go-ethereum Authors
     3  // This file is part of the Energi Core library.
     4  //
     5  // The Energi Core library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The Energi Core library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the Energi Core library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package miner
    19  
    20  import (
    21  	"math/big"
    22  	"sync/atomic"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/consensus"
    28  	"github.com/ethereum/go-ethereum/consensus/clique"
    29  	"github.com/ethereum/go-ethereum/consensus/ethash"
    30  	"github.com/ethereum/go-ethereum/core"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/core/vm"
    33  	"github.com/ethereum/go-ethereum/crypto"
    34  	"github.com/ethereum/go-ethereum/ethdb"
    35  	"github.com/ethereum/go-ethereum/event"
    36  	"github.com/ethereum/go-ethereum/params"
    37  
    38  	energi_testutils "energi.world/core/gen3/energi/common/testutils"
    39  	energi "energi.world/core/gen3/energi/consensus"
    40  	energi_params "energi.world/core/gen3/energi/params"
    41  )
    42  
    43  var (
    44  	// Test chain configurations
    45  	testTxPoolConfig  core.TxPoolConfig
    46  	ethashChainConfig *params.ChainConfig
    47  	cliqueChainConfig *params.ChainConfig
    48  	energiChainConfig *params.ChainConfig
    49  
    50  	// Test accounts
    51  	testBankKey, _  = crypto.GenerateKey()
    52  	testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
    53  	testBankFunds   = big.NewInt(1000000000000000000)
    54  
    55  	testUserKey, _  = crypto.GenerateKey()
    56  	testUserAddress = crypto.PubkeyToAddress(testUserKey.PublicKey)
    57  
    58  	// Test transactions
    59  	pendingTxs []*types.Transaction
    60  	newTxs     []*types.Transaction
    61  
    62  	// Energi signers private keys
    63  	migrationSigner, _ = crypto.GenerateKey()
    64  )
    65  
    66  func init() {
    67  	testTxPoolConfig = core.DefaultTxPoolConfig
    68  	testTxPoolConfig.Journal = ""
    69  	ethashChainConfig = params.TestChainConfig
    70  	cliqueChainConfig = params.TestChainConfig
    71  	cliqueChainConfig.Clique = &params.CliqueConfig{
    72  		Period: 10,
    73  		Epoch:  30000,
    74  	}
    75  	energiChainConfig = params.EnergiTestnetChainConfig
    76  	tx1, _ := types.SignTx(types.NewTransaction(0, testUserAddress, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey)
    77  	pendingTxs = append(pendingTxs, tx1)
    78  	tx2, _ := types.SignTx(types.NewTransaction(1, testUserAddress, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey)
    79  	newTxs = append(newTxs, tx2)
    80  }
    81  
    82  // testWorkerBackend implements worker.Backend interfaces and wraps all information needed during the testing.
    83  type testWorkerBackend struct {
    84  	db         ethdb.Database
    85  	txPool     *core.TxPool
    86  	chain      *core.BlockChain
    87  	testTxFeed event.Feed
    88  	uncleBlock *types.Block
    89  	migration  *energi_testutils.TestGen2Migration
    90  }
    91  
    92  func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, n int) *testWorkerBackend {
    93  	var (
    94  		migrations *energi_testutils.TestGen2Migration
    95  
    96  		db    = ethdb.NewMemDatabase()
    97  		gspec = core.Genesis{
    98  			Config: chainConfig,
    99  			Alloc:  core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
   100  		}
   101  	)
   102  
   103  	switch engine.(type) {
   104  	case *clique.Clique:
   105  		gspec.ExtraData = make([]byte, 32+common.AddressLength+65)
   106  		copy(gspec.ExtraData[32:], testBankAddress[:])
   107  	case *ethash.Ethash:
   108  	case *energi.Energi:
   109  		// Create a gen2 migration tempfile
   110  		migrations = energi_testutils.NewTestGen2Migration()
   111  		if err := migrations.PrepareTestGen2Migration(chainConfig.ChainID.Uint64()); err != nil {
   112  			t.Fatalf("Creating the Gen2 snapshot failed: %v", err)
   113  		}
   114  
   115  		// Set the migrations signer.
   116  		gspec.Alloc[energi_params.Energi_MigrationContract] = core.GenesisAccount{Balance: testBankFunds}
   117  		energiEngine := engine.(*energi.Energi)
   118  		// energiEngine.testing = true
   119  		energiEngine.SetMinerCB(
   120  			func() []common.Address {
   121  				return []common.Address{energi_params.Energi_MigrationContract}
   122  			},
   123  			func(addr common.Address, hash []byte) ([]byte, error) {
   124  				return crypto.Sign(hash, migrationSigner)
   125  			},
   126  			func() int { return 1 },
   127  		)
   128  		chainConfig.Energi = &params.EnergiConfig{
   129  			MigrationSigner: crypto.PubkeyToAddress(migrationSigner.PublicKey),
   130  		}
   131  
   132  		// Update genesis block config.
   133  		gspec.GasLimit = 8000000
   134  		gspec.Timestamp = 1000
   135  		gspec.Difficulty = big.NewInt(1)
   136  		gspec.Coinbase = energi_params.Energi_Treasury
   137  		gspec.Xfers = core.DeployEnergiGovernance(chainConfig)
   138  
   139  	default:
   140  		t.Fatalf("unexpected consensus engine type: %T", engine)
   141  	}
   142  	genesis := gspec.MustCommit(db)
   143  
   144  	chain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
   145  	txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain)
   146  
   147  	// Generate a small n-block chain and an uncle block for it
   148  	if n > 0 {
   149  		blocks, _ := core.GenerateChain(chainConfig, genesis, engine, db, n, func(i int, gen *core.BlockGen) {
   150  			gen.SetCoinbase(testBankAddress)
   151  		})
   152  		if _, err := chain.InsertChain(blocks); err != nil {
   153  			t.Fatalf("failed to insert origin chain: %v", err)
   154  		}
   155  	}
   156  	parent := genesis
   157  	if n > 0 {
   158  		parent = chain.GetBlockByHash(chain.CurrentBlock().ParentHash())
   159  	}
   160  	blocks, _ := core.GenerateChain(chainConfig, parent, engine, db, 1, func(i int, gen *core.BlockGen) {
   161  		gen.SetCoinbase(testUserAddress)
   162  	})
   163  
   164  	return &testWorkerBackend{
   165  		db:         db,
   166  		chain:      chain,
   167  		txPool:     txpool,
   168  		uncleBlock: blocks[0],
   169  		migration:  migrations,
   170  	}
   171  }
   172  
   173  func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain }
   174  func (b *testWorkerBackend) TxPool() *core.TxPool         { return b.txPool }
   175  func (b *testWorkerBackend) PostChainEvents(events []interface{}) {
   176  	b.chain.PostChainEvents(events, nil)
   177  }
   178  func (b *testWorkerBackend) CleanUp() error { return b.migration.CleanUp() }
   179  
   180  func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, blocks int) (*worker, *testWorkerBackend) {
   181  	backend := newTestWorkerBackend(t, chainConfig, engine, blocks)
   182  	backend.txPool.AddLocals(pendingTxs)
   183  	w := newWorker(chainConfig, engine, backend, new(event.TypeMux), time.Second, params.GenesisGasLimit, params.GenesisGasLimit, nil)
   184  	w.setEtherbase(testBankAddress)
   185  	w.setMigration(backend.migration.TempFileName())
   186  	return w, backend
   187  }
   188  
   189  // func TestPendingStateAndBlockEthash(t *testing.T) {
   190  // 	testPendingStateAndBlock(t, ethashChainConfig, ethash.NewFaker())
   191  // }
   192  // func TestPendingStateAndBlockClique(t *testing.T) {
   193  // 	testPendingStateAndBlock(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, ethdb.NewMemDatabase()))
   194  // }
   195  
   196  func TestPendingStateAndBlockEnergi(t *testing.T) {
   197  	testPendingStateAndBlock(t, energiChainConfig, energi.New(energiChainConfig.Energi, ethdb.NewMemDatabase()))
   198  }
   199  
   200  func testPendingStateAndBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) {
   201  	defer engine.Close()
   202  
   203  	w, b := newTestWorker(t, chainConfig, engine, 0)
   204  	defer func() {
   205  		w.close()
   206  		b.CleanUp()
   207  	}()
   208  
   209  	// Trigger processing of the migration tx at block number 1
   210  	atomic.StoreInt32(&w.running, 1)
   211  
   212  	// Ensure snapshot has been updated.
   213  	time.Sleep(1000 * time.Millisecond)
   214  	block, state := w.pending()
   215  
   216  	var wantBlockheight uint64
   217  	switch engine.(type) {
   218  	case *energi.Energi:
   219  		// Block count increase because of the migration block that is mined too.
   220  		wantBlockheight = 2
   221  	default:
   222  		wantBlockheight = 1
   223  	}
   224  
   225  	if block.NumberU64() != wantBlockheight {
   226  		t.Errorf("block number mismatch: have %d, want %d", block.NumberU64(), wantBlockheight)
   227  	}
   228  	if balance := state.GetBalance(testUserAddress); balance.Cmp(big.NewInt(1000)) != 0 {
   229  		t.Errorf("account balance mismatch: have %d, want %d", balance, 1000)
   230  	}
   231  	b.txPool.AddLocals(newTxs)
   232  
   233  	// Ensure the new tx events has been processed
   234  	time.Sleep(1000 * time.Millisecond)
   235  	block, state = w.pending()
   236  	if balance := state.GetBalance(testUserAddress); balance.Cmp(big.NewInt(2000)) != 0 {
   237  		t.Errorf("account balance mismatch: have %d, want %d", balance, 2000)
   238  	}
   239  }
   240  
   241  // func TestEmptyWorkEthash(t *testing.T) {
   242  // 	testEmptyWork(t, ethashChainConfig, ethash.NewFaker())
   243  // }
   244  // func TestEmptyWorkClique(t *testing.T) {
   245  // 	testEmptyWork(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, ethdb.NewMemDatabase()))
   246  // }
   247  func TestEmptyWorkEnergi(t *testing.T) {
   248  	testEmptyWork(t, energiChainConfig, energi.New(energiChainConfig.Energi, ethdb.NewMemDatabase()))
   249  }
   250  
   251  func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) {
   252  	defer engine.Close()
   253  
   254  	w, b := newTestWorker(t, chainConfig, engine, 0)
   255  	defer func() {
   256  		w.close()
   257  		b.CleanUp()
   258  	}()
   259  
   260  	var (
   261  		taskCh    = make(chan struct{}, 2)
   262  		taskIndex int
   263  	)
   264  
   265  	checkEqual := func(t *testing.T, task *task, index int) {
   266  		receiptLen, balance := 0, big.NewInt(0)
   267  		if index == 1 {
   268  			receiptLen, balance = 1, big.NewInt(1000)
   269  		}
   270  		if len(task.receipts) != receiptLen {
   271  			t.Errorf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen)
   272  		}
   273  		if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 {
   274  			t.Errorf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance)
   275  		}
   276  	}
   277  
   278  	w.newTaskHook = func(task *task) {
   279  		if task.block.NumberU64() == 2 {
   280  			checkEqual(t, task, taskIndex)
   281  			taskIndex++
   282  			taskCh <- struct{}{}
   283  		}
   284  	}
   285  	w.fullTaskHook = func() {
   286  		time.Sleep(1000 * time.Millisecond)
   287  	}
   288  
   289  	// Trigger processing of the migration tx at block number 1
   290  	atomic.StoreInt32(&w.running, 1)
   291  
   292  	// Ensure worker has finished initialization
   293  	for {
   294  		b := w.pendingBlock()
   295  		if b != nil && b.NumberU64() >= 1 {
   296  			break
   297  		}
   298  	}
   299  
   300  	w.start()
   301  	for i := 0; i < 2; i++ {
   302  		select {
   303  		case <-taskCh:
   304  		case <-time.NewTimer(2 * time.Second).C:
   305  			t.Error("new task timeout")
   306  		}
   307  	}
   308  }
   309  
   310  func TestStreamUncleBlock(t *testing.T) {
   311  	ethash := ethash.NewFaker()
   312  	defer ethash.Close()
   313  
   314  	w, b := newTestWorker(t, ethashChainConfig, ethash, 1)
   315  	defer func() {
   316  		w.close()
   317  		b.CleanUp()
   318  	}()
   319  
   320  	var taskCh = make(chan struct{})
   321  
   322  	taskIndex := 0
   323  	w.newTaskHook = func(task *task) {
   324  		if task.block.NumberU64() == 2 {
   325  			if taskIndex == 2 {
   326  				have := task.block.Header().UncleHash
   327  				want := types.CalcUncleHash([]*types.Header{b.uncleBlock.Header()})
   328  				if have != want {
   329  					t.Errorf("uncle hash mismatch: have %s, want %s", have.Hex(), want.Hex())
   330  				}
   331  			}
   332  			taskCh <- struct{}{}
   333  			taskIndex++
   334  		}
   335  	}
   336  	w.skipSealHook = func(task *task) bool {
   337  		return true
   338  	}
   339  	w.fullTaskHook = func() {
   340  		time.Sleep(100 * time.Millisecond)
   341  	}
   342  
   343  	// Ensure worker has finished initialization
   344  	for {
   345  		b := w.pendingBlock()
   346  		if b != nil && b.NumberU64() == 2 {
   347  			break
   348  		}
   349  	}
   350  	w.start()
   351  
   352  	// Ignore the first two works
   353  	for i := 0; i < 2; i++ {
   354  		select {
   355  		case <-taskCh:
   356  		case <-time.NewTimer(time.Second).C:
   357  			t.Error("new task timeout")
   358  		}
   359  	}
   360  	b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.uncleBlock}})
   361  
   362  	select {
   363  	case <-taskCh:
   364  	case <-time.NewTimer(time.Second).C:
   365  		t.Error("new task timeout")
   366  	}
   367  }
   368  
   369  // func TestRegenerateMiningBlockEthash(t *testing.T) {
   370  // 	testRegenerateMiningBlock(t, ethashChainConfig, ethash.NewFaker())
   371  // }
   372  
   373  // func TestRegenerateMiningBlockClique(t *testing.T) {
   374  // 	testRegenerateMiningBlock(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, ethdb.NewMemDatabase()))
   375  // }
   376  
   377  func TestRegenerateMiningBlockEnergi(t *testing.T) {
   378  	testRegenerateMiningBlock(t, energiChainConfig, energi.New(energiChainConfig.Energi, ethdb.NewMemDatabase()))
   379  }
   380  
   381  func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) {
   382  	defer engine.Close()
   383  
   384  	w, b := newTestWorker(t, chainConfig, engine, 0)
   385  	defer func() {
   386  		w.close()
   387  		b.CleanUp()
   388  	}()
   389  
   390  	var taskCh = make(chan struct{})
   391  
   392  	taskIndex := 0
   393  	w.newTaskHook = func(task *task) {
   394  		if task.block.NumberU64() == 2 {
   395  			if taskIndex == 2 {
   396  				receiptLen, balance := 2, big.NewInt(2000)
   397  				if len(task.receipts) != receiptLen {
   398  					t.Errorf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen)
   399  				}
   400  				if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 {
   401  					t.Errorf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance)
   402  				}
   403  			}
   404  			taskCh <- struct{}{}
   405  			taskIndex++
   406  		}
   407  	}
   408  	// w.skipSealHook = func(task *task) bool {
   409  	// 	return true
   410  	// }
   411  	w.fullTaskHook = func() {
   412  		time.Sleep(100 * time.Millisecond)
   413  	}
   414  
   415  	// Trigger processing of the migration tx at block number 1
   416  	atomic.StoreInt32(&w.running, 1)
   417  
   418  	// Ensure worker has finished initialization
   419  	for {
   420  		b := w.pendingBlock()
   421  		if b != nil && b.NumberU64() == 1 {
   422  			break
   423  		}
   424  	}
   425  
   426  	w.start()
   427  	// Ignore the first two works
   428  	for i := 0; i < 2; i++ {
   429  		select {
   430  		case <-taskCh:
   431  		case <-time.NewTimer(2 * time.Second).C:
   432  			t.Error("new task timeout ..")
   433  		}
   434  	}
   435  	b.txPool.AddLocals(newTxs)
   436  	time.Sleep(time.Second)
   437  
   438  	select {
   439  	case <-taskCh:
   440  	case <-time.NewTimer(2 * time.Second).C:
   441  		t.Error("new task timeout")
   442  	}
   443  }
   444  
   445  // func TestAdjustIntervalEthash(t *testing.T) {
   446  // 	testAdjustInterval(t, ethashChainConfig, ethash.NewFaker())
   447  // }
   448  
   449  // func TestAdjustIntervalClique(t *testing.T) {
   450  // 	testAdjustInterval(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, ethdb.NewMemDatabase()))
   451  // }
   452  
   453  func TestAdjustIntervalEnergi(t *testing.T) {
   454  	testAdjustInterval(t, energiChainConfig, energi.New(energiChainConfig.Energi, ethdb.NewMemDatabase()))
   455  }
   456  
   457  func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) {
   458  	defer engine.Close()
   459  
   460  	w, b := newTestWorker(t, chainConfig, engine, 0)
   461  	defer func() {
   462  		w.close()
   463  		b.CleanUp()
   464  	}()
   465  
   466  	w.skipSealHook = func(task *task) bool {
   467  		return true
   468  	}
   469  	w.fullTaskHook = func() {
   470  		time.Sleep(100 * time.Millisecond)
   471  	}
   472  	var (
   473  		progress = make(chan struct{}, 10)
   474  		result   = make([]float64, 0, 10)
   475  		index    = 0
   476  		start    = false
   477  	)
   478  	w.resubmitHook = func(minInterval time.Duration, recommitInterval time.Duration) {
   479  		// Short circuit if interval checking hasn't started.
   480  		if !start {
   481  			return
   482  		}
   483  		var wantMinInterval, wantRecommitInterval time.Duration
   484  
   485  		switch index {
   486  		case 0:
   487  			wantMinInterval, wantRecommitInterval = 3*time.Second, 3*time.Second
   488  		case 1:
   489  			origin := float64(3 * time.Second.Nanoseconds())
   490  			estimate := origin*(1-intervalAdjustRatio) + intervalAdjustRatio*(origin/0.8+intervalAdjustBias)
   491  			wantMinInterval, wantRecommitInterval = 3*time.Second, time.Duration(estimate)*time.Nanosecond
   492  		case 2:
   493  			estimate := result[index-1]
   494  			min := float64(3 * time.Second.Nanoseconds())
   495  			estimate = estimate*(1-intervalAdjustRatio) + intervalAdjustRatio*(min-intervalAdjustBias)
   496  			wantMinInterval, wantRecommitInterval = 3*time.Second, time.Duration(estimate)*time.Nanosecond
   497  		case 3:
   498  			wantMinInterval, wantRecommitInterval = time.Second, time.Second
   499  		}
   500  
   501  		// Check interval
   502  		if minInterval != wantMinInterval {
   503  			t.Errorf("resubmit min interval mismatch: have %v, want %v ", minInterval, wantMinInterval)
   504  		}
   505  		if recommitInterval != wantRecommitInterval {
   506  			t.Errorf("resubmit interval mismatch: have %v, want %v", recommitInterval, wantRecommitInterval)
   507  		}
   508  		result = append(result, float64(recommitInterval.Nanoseconds()))
   509  		index++
   510  		progress <- struct{}{}
   511  	}
   512  
   513  	// Trigger processing of the migration tx at block number 1
   514  	atomic.StoreInt32(&w.running, 1)
   515  
   516  	// Ensure worker has finished initialization
   517  	for {
   518  		b := w.pendingBlock()
   519  		if b != nil && b.NumberU64() == 1 {
   520  			break
   521  		}
   522  	}
   523  
   524  	w.start()
   525  
   526  	time.Sleep(time.Second)
   527  
   528  	start = true
   529  	w.setRecommitInterval(3 * time.Second)
   530  	select {
   531  	case <-progress:
   532  	case <-time.NewTimer(time.Second).C:
   533  		t.Error("interval reset timeout")
   534  	}
   535  
   536  	w.resubmitAdjustCh <- &intervalAdjust{inc: true, ratio: 0.8}
   537  	select {
   538  	case <-progress:
   539  	case <-time.NewTimer(time.Second).C:
   540  		t.Error("interval reset timeout")
   541  	}
   542  
   543  	w.resubmitAdjustCh <- &intervalAdjust{inc: false}
   544  	select {
   545  	case <-progress:
   546  	case <-time.NewTimer(time.Second).C:
   547  		t.Error("interval reset timeout")
   548  	}
   549  
   550  	w.setRecommitInterval(500 * time.Millisecond)
   551  	select {
   552  	case <-progress:
   553  	case <-time.NewTimer(time.Second).C:
   554  		t.Error("interval reset timeout")
   555  	}
   556  }