github.com/beyonderyue/gochain@v2.2.26+incompatible/light/txpool.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package light 18 19 import ( 20 "context" 21 "fmt" 22 "sync" 23 "time" 24 25 "github.com/gochain-io/gochain/common" 26 "github.com/gochain-io/gochain/core" 27 "github.com/gochain-io/gochain/core/rawdb" 28 "github.com/gochain-io/gochain/core/state" 29 "github.com/gochain-io/gochain/core/types" 30 "github.com/gochain-io/gochain/log" 31 "github.com/gochain-io/gochain/params" 32 "github.com/gochain-io/gochain/rlp" 33 ) 34 35 const ( 36 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 37 chainHeadChanSize = 10 38 ) 39 40 // txPermanent is the number of mined blocks after a mined transaction is 41 // considered permanent and no rollback is expected 42 var txPermanent = uint64(500) 43 44 // TxPool implements the transaction pool for light clients, which keeps track 45 // of the status of locally created transactions, detecting if they are included 46 // in a block (mined) or rolled back. There are no queued transactions since we 47 // always receive all locally signed transactions in the same order as they are 48 // created. 49 type TxPool struct { 50 config *params.ChainConfig 51 signer types.Signer 52 quit chan bool 53 54 txFeed core.NewTxsFeed 55 56 chainHeadCh chan core.ChainHeadEvent 57 58 mu sync.RWMutex 59 chain *LightChain 60 odr OdrBackend 61 chainDb common.Database 62 relay TxRelayBackend 63 head common.Hash 64 nonce map[common.Address]uint64 // "pending" nonce 65 pending map[common.Hash]*types.Transaction // pending transactions by tx hash 66 mined map[common.Hash][]*types.Transaction // mined transactions by block hash 67 clearIdx uint64 // earliest block nr that can contain mined tx info 68 69 homestead bool 70 } 71 72 // TxRelayBackend provides an interface to the mechanism that forwards transacions 73 // to the ETH network. The implementations of the functions should be non-blocking. 74 // 75 // Send instructs backend to forward new transactions 76 // NewHead notifies backend about a new head after processed by the tx pool, 77 // including mined and rolled back transactions since the last event 78 // Discard notifies backend about transactions that should be discarded either 79 // because they have been replaced by a re-send or because they have been mined 80 // long ago and no rollback is expected 81 type TxRelayBackend interface { 82 Send(txs types.Transactions) 83 NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) 84 Discard(hashes []common.Hash) 85 } 86 87 // NewTxPool creates a new light transaction pool 88 func NewTxPool(config *params.ChainConfig, chain *LightChain, relay TxRelayBackend) *TxPool { 89 pool := &TxPool{ 90 config: config, 91 signer: types.NewEIP155Signer(config.ChainId), 92 nonce: make(map[common.Address]uint64), 93 pending: make(map[common.Hash]*types.Transaction), 94 mined: make(map[common.Hash][]*types.Transaction), 95 quit: make(chan bool), 96 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 97 chain: chain, 98 relay: relay, 99 odr: chain.Odr(), 100 chainDb: chain.Odr().Database(), 101 head: chain.CurrentHeader().Hash(), 102 clearIdx: chain.CurrentHeader().Number.Uint64(), 103 } 104 pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh, "light.TxPool") 105 go pool.eventLoop() 106 107 return pool 108 } 109 110 // currentState returns the light state of the current head header 111 func (pool *TxPool) currentState(ctx context.Context) *state.StateDB { 112 return NewState(ctx, pool.chain.CurrentHeader(), pool.odr) 113 } 114 115 // GetNonce returns the "pending" nonce of a given address. It always queries 116 // the nonce belonging to the latest header too in order to detect if another 117 // client using the same key sent a transaction. 118 func (pool *TxPool) GetNonce(ctx context.Context, addr common.Address) (uint64, error) { 119 state := pool.currentState(ctx) 120 nonce, err := state.GetNonceErr(addr) 121 if err != nil { 122 return 0, err 123 } 124 sn, ok := pool.nonce[addr] 125 if ok && sn > nonce { 126 nonce = sn 127 } 128 if !ok || sn < nonce { 129 pool.nonce[addr] = nonce 130 } 131 return nonce, nil 132 } 133 134 // txStateChanges stores the recent changes between pending/mined states of 135 // transactions. True means mined, false means rolled back, no entry means no change 136 type txStateChanges map[common.Hash]bool 137 138 // setState sets the status of a tx to either recently mined or recently rolled back 139 func (txc txStateChanges) setState(txHash common.Hash, mined bool) { 140 val, ent := txc[txHash] 141 if ent && (val != mined) { 142 delete(txc, txHash) 143 } else { 144 txc[txHash] = mined 145 } 146 } 147 148 // getLists creates lists of mined and rolled back tx hashes 149 func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Hash) { 150 for hash, val := range txc { 151 if val { 152 mined = append(mined, hash) 153 } else { 154 rollback = append(rollback, hash) 155 } 156 } 157 return 158 } 159 160 // checkMinedTxs checks newly added blocks for the currently pending transactions 161 // and marks them as mined if necessary. It also stores block position in the db 162 // and adds them to the received txStateChanges map. 163 func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number uint64, txc txStateChanges) error { 164 // If no transactions are pending, we don't care about anything 165 if len(pool.pending) == 0 { 166 return nil 167 } 168 block, err := GetBlock(ctx, pool.odr, hash, number) 169 if err != nil { 170 return err 171 } 172 // Gather all the local transaction mined in this block 173 list := pool.mined[hash] 174 for _, tx := range block.Transactions() { 175 if _, ok := pool.pending[tx.Hash()]; ok { 176 list = append(list, tx) 177 } 178 } 179 // If some transactions have been mined, write the needed data to disk and update 180 if list != nil { 181 // Retrieve all the receipts belonging to this block and write the loopup table 182 if _, err := GetBlockReceipts(ctx, pool.odr, hash, number); err != nil { // ODR caches, ignore results 183 return err 184 } 185 rawdb.WriteTxLookupEntries(pool.chainDb.GlobalTable(), block) 186 // Update the transaction pool's state 187 for _, tx := range list { 188 delete(pool.pending, tx.Hash()) 189 txc.setState(tx.Hash(), true) 190 } 191 pool.mined[hash] = list 192 } 193 return nil 194 } 195 196 // rollbackTxs marks the transactions contained in recently rolled back blocks 197 // as rolled back. It also removes any positional lookup entries. 198 func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) { 199 gbatch := pool.chainDb.GlobalTable().NewBatch() 200 if list, ok := pool.mined[hash]; ok { 201 for _, tx := range list { 202 txHash := tx.Hash() 203 rawdb.DeleteTxLookupEntry(gbatch, txHash) 204 pool.pending[txHash] = tx 205 txc.setState(txHash, false) 206 } 207 delete(pool.mined, hash) 208 } 209 rawdb.Must("batch delete tx lookup entries", gbatch.Write) 210 } 211 212 // reorgOnNewHead sets a new head header, processing (and rolling back if necessary) 213 // the blocks since the last known head and returns a txStateChanges map containing 214 // the recently mined and rolled back transaction hashes. If an error (context 215 // timeout) occurs during checking new blocks, it leaves the locally known head 216 // at the latest checked block and still returns a valid txStateChanges, making it 217 // possible to continue checking the missing blocks at the next chain head event 218 func (pool *TxPool) reorgOnNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) { 219 txc := make(txStateChanges) 220 oldh := pool.chain.GetHeaderByHash(pool.head) 221 newh := newHeader 222 // find common ancestor, create list of rolled back and new block hashes 223 var oldHashes, newHashes []common.Hash 224 for oldh.Hash() != newh.Hash() { 225 if oldh.Number.Uint64() >= newh.Number.Uint64() { 226 oldHashes = append(oldHashes, oldh.Hash()) 227 oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1) 228 } 229 if oldh.Number.Uint64() < newh.Number.Uint64() { 230 newHashes = append(newHashes, newh.Hash()) 231 newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1) 232 if newh == nil { 233 // happens when CHT syncing, nothing to do 234 newh = oldh 235 } 236 } 237 } 238 if oldh.Number.Uint64() < pool.clearIdx { 239 pool.clearIdx = oldh.Number.Uint64() 240 } 241 // roll back old blocks 242 for _, hash := range oldHashes { 243 pool.rollbackTxs(hash, txc) 244 } 245 pool.head = oldh.Hash() 246 // check mined txs of new blocks (array is in reversed order) 247 for i := len(newHashes) - 1; i >= 0; i-- { 248 hash := newHashes[i] 249 if err := pool.checkMinedTxs(ctx, hash, newHeader.Number.Uint64()-uint64(i), txc); err != nil { 250 return txc, err 251 } 252 pool.head = hash 253 } 254 255 // clear old mined tx entries of old blocks 256 if idx := newHeader.Number.Uint64(); idx > pool.clearIdx+txPermanent { 257 idx2 := idx - txPermanent 258 if len(pool.mined) > 0 { 259 for i := pool.clearIdx; i < idx2; i++ { 260 hash := rawdb.ReadCanonicalHash(pool.chainDb, i) 261 if list, ok := pool.mined[hash]; ok { 262 hashes := make([]common.Hash, len(list)) 263 for i, tx := range list { 264 hashes[i] = tx.Hash() 265 } 266 pool.relay.Discard(hashes) 267 delete(pool.mined, hash) 268 } 269 } 270 } 271 pool.clearIdx = idx2 272 } 273 274 return txc, nil 275 } 276 277 // blockCheckTimeout is the time limit for checking new blocks for mined 278 // transactions. Checking resumes at the next chain head event if timed out. 279 const blockCheckTimeout = time.Second * 3 280 281 // eventLoop processes chain head events and also notifies the tx relay backend 282 // about the new head hash and tx state changes 283 func (pool *TxPool) eventLoop() { 284 for ev := range pool.chainHeadCh { 285 pool.setNewHead(ev.Block.Header()) 286 // hack in order to avoid hogging the lock; this part will 287 // be replaced by a subsequent PR. 288 time.Sleep(time.Millisecond) 289 } 290 } 291 292 func (pool *TxPool) setNewHead(head *types.Header) { 293 pool.mu.Lock() 294 defer pool.mu.Unlock() 295 296 ctx, cancel := context.WithTimeout(context.Background(), blockCheckTimeout) 297 defer cancel() 298 299 txc, _ := pool.reorgOnNewHead(ctx, head) 300 m, r := txc.getLists() 301 pool.relay.NewHead(pool.head, m, r) 302 pool.homestead = pool.config.IsHomestead(head.Number) 303 pool.signer = types.MakeSigner(pool.config, head.Number) 304 } 305 306 // Stop stops the light transaction pool 307 func (pool *TxPool) Stop() { 308 // Unsubscribe all subscriptions. 309 pool.txFeed.Close() 310 311 // Unsubscribe subscriptions registered from blockchain 312 pool.chain.UnsubscribeChainHeadEvent(pool.chainHeadCh) 313 close(pool.quit) 314 log.Info("Transaction pool stopped") 315 } 316 317 // SubscribeNewTxsEvent registers a subscription of core.NewTxsEvent and 318 // starts sending event to the given channel. 319 func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent, name string) { 320 pool.txFeed.Subscribe(ch, name) 321 } 322 323 func (pool *TxPool) UnsubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) { 324 pool.txFeed.Unsubscribe(ch) 325 } 326 327 // Stats returns the number of currently pending (locally created) transactions 328 func (pool *TxPool) Stats() (pending int) { 329 pool.mu.RLock() 330 defer pool.mu.RUnlock() 331 332 pending = len(pool.pending) 333 return 334 } 335 336 // validateTx checks whether a transaction is valid according to the consensus rules. 337 func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error { 338 // Validate sender 339 var ( 340 from common.Address 341 err error 342 ) 343 344 // Validate the transaction sender and it's sig. Throw 345 // if the from fields is invalid. 346 if from, err = types.Sender(pool.signer, tx); err != nil { 347 return core.ErrInvalidSender 348 } 349 // Last but not least check for nonce errors 350 currentState := pool.currentState(ctx) 351 if tx.Nonce() < currentState.GetNonce(from) { 352 return core.ErrNonceTooLow 353 } 354 355 // Check the transaction doesn't exceed the current 356 // block limit gas. 357 header := pool.chain.GetHeaderByHash(pool.head) 358 if header.GasLimit < tx.Gas() { 359 return core.ErrGasLimit 360 } 361 362 // Transactions can't be negative. This may never happen 363 // using RLP decoded transactions but may occur if you create 364 // a transaction using the RPC for example. 365 if tx.Value().Sign() < 0 { 366 return core.ErrNegativeValue 367 } 368 369 // Transactor should have enough funds to cover the costs 370 // cost == V + GP * GL 371 if currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { 372 return core.ErrInsufficientFunds 373 } 374 375 // Should supply enough intrinsic gas 376 gas, err := core.IntrinsicGas(tx.Data(), tx.To() == nil, pool.homestead) 377 if err != nil { 378 return err 379 } 380 if tx.Gas() < gas { 381 return core.ErrIntrinsicGas 382 } 383 return nil 384 } 385 386 // add validates a new transaction and sets its state pending if processable. 387 // It also updates the locally stored nonce if necessary. 388 func (pool *TxPool) add(ctx context.Context, tx *types.Transaction) error { 389 hash := tx.Hash() 390 391 if pool.pending[hash] != nil { 392 return fmt.Errorf("Known transaction (%x)", hash[:4]) 393 } 394 err := pool.validateTx(ctx, tx) 395 if err != nil { 396 return err 397 } 398 399 if _, ok := pool.pending[hash]; !ok { 400 pool.pending[hash] = tx 401 402 nonce := tx.Nonce() + 1 403 404 addr, _ := types.Sender(pool.signer, tx) 405 if nonce > pool.nonce[addr] { 406 pool.nonce[addr] = nonce 407 } 408 409 pool.txFeed.Send(core.NewTxsEvent{Txs: types.Transactions{tx}}) 410 } 411 412 // Print a log message if low enough level is set 413 log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To()) 414 return nil 415 } 416 417 // Add adds a transaction to the pool if valid and passes it to the tx relay 418 // backend 419 func (self *TxPool) Add(ctx context.Context, tx *types.Transaction) error { 420 self.mu.Lock() 421 defer self.mu.Unlock() 422 423 data, err := rlp.EncodeToBytes(tx) 424 if err != nil { 425 return err 426 } 427 428 if err := self.add(ctx, tx); err != nil { 429 return err 430 } 431 //fmt.Println("Send", tx.Hash()) 432 self.relay.Send(types.Transactions{tx}) 433 434 if err := self.chainDb.GlobalTable().Put(tx.Hash().Bytes(), data); err != nil { 435 log.Error("Cannot add to tx pool chain db", err) 436 } 437 return nil 438 } 439 440 // AddTransactions adds all valid transactions to the pool and passes them to 441 // the tx relay backend 442 func (self *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) { 443 self.mu.Lock() 444 defer self.mu.Unlock() 445 var sendTx types.Transactions 446 447 for _, tx := range txs { 448 if err := self.add(ctx, tx); err == nil { 449 sendTx = append(sendTx, tx) 450 } 451 } 452 if len(sendTx) > 0 { 453 self.relay.Send(sendTx) 454 } 455 } 456 457 // GetTransaction returns a transaction if it is contained in the pool 458 // and nil otherwise. 459 func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction { 460 // check the txs first 461 if tx, ok := tp.pending[hash]; ok { 462 return tx 463 } 464 return nil 465 } 466 467 // GetTransactions returns all currently processable transactions. 468 // The returned slice may be modified by the caller. 469 func (self *TxPool) GetTransactions() types.Transactions { 470 self.mu.RLock() 471 defer self.mu.RUnlock() 472 473 txs := make(types.Transactions, len(self.pending)) 474 i := 0 475 for _, tx := range self.pending { 476 txs[i] = tx 477 i++ 478 } 479 return txs 480 } 481 482 // Content retrieves the data content of the transaction pool, returning all the 483 // pending as well as queued transactions, grouped by account and nonce. 484 func (self *TxPool) Content(ctx context.Context) (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 485 self.mu.RLock() 486 defer self.mu.RUnlock() 487 488 // Retrieve all the pending transactions and sort by account and by nonce 489 pending := make(map[common.Address]types.Transactions) 490 for _, tx := range self.pending { 491 account, _ := types.Sender(self.signer, tx) 492 pending[account] = append(pending[account], tx) 493 } 494 // There are no queued transactions in a light pool, just return an empty map 495 queued := make(map[common.Address]types.Transactions) 496 return pending, queued 497 } 498 499 // RemoveTransactions removes all given transactions from the pool. 500 func (self *TxPool) RemoveTransactions(txs types.Transactions) { 501 self.mu.Lock() 502 defer self.mu.Unlock() 503 var hashes []common.Hash 504 batch := self.chainDb.GlobalTable().NewBatch() 505 for _, tx := range txs { 506 //self.RemoveTx(tx.Hash()) 507 hash := tx.Hash() 508 delete(self.pending, hash) 509 rawdb.Must("add tx delete to batch", func() error { 510 return batch.Delete(hash[:]) 511 }) 512 hashes = append(hashes, hash) 513 } 514 rawdb.Must("batch delete txs", batch.Write) 515 self.relay.Discard(hashes) 516 } 517 518 // RemoveTx removes the transaction with the given hash from the pool. 519 func (pool *TxPool) RemoveTx(hash common.Hash) { 520 pool.mu.Lock() 521 defer pool.mu.Unlock() 522 // delete from pending pool 523 delete(pool.pending, hash) 524 if err := pool.chainDb.GlobalTable().Delete(hash[:]); err != nil { 525 log.Error("Cannot remove tx from tx pool chain db", err) 526 } 527 pool.relay.Discard([]common.Hash{hash}) 528 }