github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/light/txpool.go (about) 1 // This file is part of the go-sberex library. The go-sberex library is 2 // free software: you can redistribute it and/or modify it under the terms 3 // of the GNU Lesser General Public License as published by the Free 4 // Software Foundation, either version 3 of the License, or (at your option) 5 // any later version. 6 // 7 // The go-sberex library is distributed in the hope that it will be useful, 8 // but WITHOUT ANY WARRANTY; without even the implied warranty of 9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 10 // General Public License <http://www.gnu.org/licenses/> for more details. 11 12 package light 13 14 import ( 15 "context" 16 "fmt" 17 "sync" 18 "time" 19 20 "github.com/Sberex/go-sberex/common" 21 "github.com/Sberex/go-sberex/core" 22 "github.com/Sberex/go-sberex/core/state" 23 "github.com/Sberex/go-sberex/core/types" 24 "github.com/Sberex/go-sberex/ethdb" 25 "github.com/Sberex/go-sberex/event" 26 "github.com/Sberex/go-sberex/log" 27 "github.com/Sberex/go-sberex/params" 28 "github.com/Sberex/go-sberex/rlp" 29 ) 30 31 const ( 32 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. 33 chainHeadChanSize = 10 34 ) 35 36 // txPermanent is the number of mined blocks after a mined transaction is 37 // considered permanent and no rollback is expected 38 var txPermanent = uint64(500) 39 40 // TxPool implements the transaction pool for light clients, which keeps track 41 // of the status of locally created transactions, detecting if they are included 42 // in a block (mined) or rolled back. There are no queued transactions since we 43 // always receive all locally signed transactions in the same order as they are 44 // created. 45 type TxPool struct { 46 config *params.ChainConfig 47 signer types.Signer 48 quit chan bool 49 txFeed event.Feed 50 scope event.SubscriptionScope 51 chainHeadCh chan core.ChainHeadEvent 52 chainHeadSub event.Subscription 53 mu sync.RWMutex 54 chain *LightChain 55 odr OdrBackend 56 chainDb ethdb.Database 57 relay TxRelayBackend 58 head common.Hash 59 nonce map[common.Address]uint64 // "pending" nonce 60 pending map[common.Hash]*types.Transaction // pending transactions by tx hash 61 mined map[common.Hash][]*types.Transaction // mined transactions by block hash 62 clearIdx uint64 // earliest block nr that can contain mined tx info 63 64 homestead bool 65 } 66 67 // TxRelayBackend provides an interface to the mechanism that forwards transacions 68 // to the Sberex network. The implementations of the functions should be non-blocking. 69 // 70 // Send instructs backend to forward new transactions 71 // NewHead notifies backend about a new head after processed by the tx pool, 72 // including mined and rolled back transactions since the last event 73 // Discard notifies backend about transactions that should be discarded either 74 // because they have been replaced by a re-send or because they have been mined 75 // long ago and no rollback is expected 76 type TxRelayBackend interface { 77 Send(txs types.Transactions) 78 NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) 79 Discard(hashes []common.Hash) 80 } 81 82 // NewTxPool creates a new light transaction pool 83 func NewTxPool(config *params.ChainConfig, chain *LightChain, relay TxRelayBackend) *TxPool { 84 pool := &TxPool{ 85 config: config, 86 signer: types.NewEIP155Signer(config.ChainId), 87 nonce: make(map[common.Address]uint64), 88 pending: make(map[common.Hash]*types.Transaction), 89 mined: make(map[common.Hash][]*types.Transaction), 90 quit: make(chan bool), 91 chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), 92 chain: chain, 93 relay: relay, 94 odr: chain.Odr(), 95 chainDb: chain.Odr().Database(), 96 head: chain.CurrentHeader().Hash(), 97 clearIdx: chain.CurrentHeader().Number.Uint64(), 98 } 99 // Subscribe events from blockchain 100 pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) 101 go pool.eventLoop() 102 103 return pool 104 } 105 106 // currentState returns the light state of the current head header 107 func (pool *TxPool) currentState(ctx context.Context) *state.StateDB { 108 return NewState(ctx, pool.chain.CurrentHeader(), pool.odr) 109 } 110 111 // GetNonce returns the "pending" nonce of a given address. It always queries 112 // the nonce belonging to the latest header too in order to detect if another 113 // client using the same key sent a transaction. 114 func (pool *TxPool) GetNonce(ctx context.Context, addr common.Address) (uint64, error) { 115 state := pool.currentState(ctx) 116 nonce := state.GetNonce(addr) 117 if state.Error() != nil { 118 return 0, state.Error() 119 } 120 sn, ok := pool.nonce[addr] 121 if ok && sn > nonce { 122 nonce = sn 123 } 124 if !ok || sn < nonce { 125 pool.nonce[addr] = nonce 126 } 127 return nonce, nil 128 } 129 130 // txStateChanges stores the recent changes between pending/mined states of 131 // transactions. True means mined, false means rolled back, no entry means no change 132 type txStateChanges map[common.Hash]bool 133 134 // setState sets the status of a tx to either recently mined or recently rolled back 135 func (txc txStateChanges) setState(txHash common.Hash, mined bool) { 136 val, ent := txc[txHash] 137 if ent && (val != mined) { 138 delete(txc, txHash) 139 } else { 140 txc[txHash] = mined 141 } 142 } 143 144 // getLists creates lists of mined and rolled back tx hashes 145 func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Hash) { 146 for hash, val := range txc { 147 if val { 148 mined = append(mined, hash) 149 } else { 150 rollback = append(rollback, hash) 151 } 152 } 153 return 154 } 155 156 // checkMinedTxs checks newly added blocks for the currently pending transactions 157 // and marks them as mined if necessary. It also stores block position in the db 158 // and adds them to the received txStateChanges map. 159 func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number uint64, txc txStateChanges) error { 160 // If no transactions are pending, we don't care about anything 161 if len(pool.pending) == 0 { 162 return nil 163 } 164 block, err := GetBlock(ctx, pool.odr, hash, number) 165 if err != nil { 166 return err 167 } 168 // Gather all the local transaction mined in this block 169 list := pool.mined[hash] 170 for _, tx := range block.Transactions() { 171 if _, ok := pool.pending[tx.Hash()]; ok { 172 list = append(list, tx) 173 } 174 } 175 // If some transactions have been mined, write the needed data to disk and update 176 if list != nil { 177 // Retrieve all the receipts belonging to this block and write the loopup table 178 if _, err := GetBlockReceipts(ctx, pool.odr, hash, number); err != nil { // ODR caches, ignore results 179 return err 180 } 181 if err := core.WriteTxLookupEntries(pool.chainDb, block); err != nil { 182 return err 183 } 184 // Update the transaction pool's state 185 for _, tx := range list { 186 delete(pool.pending, tx.Hash()) 187 txc.setState(tx.Hash(), true) 188 } 189 pool.mined[hash] = list 190 } 191 return nil 192 } 193 194 // rollbackTxs marks the transactions contained in recently rolled back blocks 195 // as rolled back. It also removes any positional lookup entries. 196 func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) { 197 if list, ok := pool.mined[hash]; ok { 198 for _, tx := range list { 199 txHash := tx.Hash() 200 core.DeleteTxLookupEntry(pool.chainDb, txHash) 201 pool.pending[txHash] = tx 202 txc.setState(txHash, false) 203 } 204 delete(pool.mined, hash) 205 } 206 } 207 208 // reorgOnNewHead sets a new head header, processing (and rolling back if necessary) 209 // the blocks since the last known head and returns a txStateChanges map containing 210 // the recently mined and rolled back transaction hashes. If an error (context 211 // timeout) occurs during checking new blocks, it leaves the locally known head 212 // at the latest checked block and still returns a valid txStateChanges, making it 213 // possible to continue checking the missing blocks at the next chain head event 214 func (pool *TxPool) reorgOnNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) { 215 txc := make(txStateChanges) 216 oldh := pool.chain.GetHeaderByHash(pool.head) 217 newh := newHeader 218 // find common ancestor, create list of rolled back and new block hashes 219 var oldHashes, newHashes []common.Hash 220 for oldh.Hash() != newh.Hash() { 221 if oldh.Number.Uint64() >= newh.Number.Uint64() { 222 oldHashes = append(oldHashes, oldh.Hash()) 223 oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1) 224 } 225 if oldh.Number.Uint64() < newh.Number.Uint64() { 226 newHashes = append(newHashes, newh.Hash()) 227 newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1) 228 if newh == nil { 229 // happens when CHT syncing, nothing to do 230 newh = oldh 231 } 232 } 233 } 234 if oldh.Number.Uint64() < pool.clearIdx { 235 pool.clearIdx = oldh.Number.Uint64() 236 } 237 // roll back old blocks 238 for _, hash := range oldHashes { 239 pool.rollbackTxs(hash, txc) 240 } 241 pool.head = oldh.Hash() 242 // check mined txs of new blocks (array is in reversed order) 243 for i := len(newHashes) - 1; i >= 0; i-- { 244 hash := newHashes[i] 245 if err := pool.checkMinedTxs(ctx, hash, newHeader.Number.Uint64()-uint64(i), txc); err != nil { 246 return txc, err 247 } 248 pool.head = hash 249 } 250 251 // clear old mined tx entries of old blocks 252 if idx := newHeader.Number.Uint64(); idx > pool.clearIdx+txPermanent { 253 idx2 := idx - txPermanent 254 if len(pool.mined) > 0 { 255 for i := pool.clearIdx; i < idx2; i++ { 256 hash := core.GetCanonicalHash(pool.chainDb, i) 257 if list, ok := pool.mined[hash]; ok { 258 hashes := make([]common.Hash, len(list)) 259 for i, tx := range list { 260 hashes[i] = tx.Hash() 261 } 262 pool.relay.Discard(hashes) 263 delete(pool.mined, hash) 264 } 265 } 266 } 267 pool.clearIdx = idx2 268 } 269 270 return txc, nil 271 } 272 273 // blockCheckTimeout is the time limit for checking new blocks for mined 274 // transactions. Checking resumes at the next chain head event if timed out. 275 const blockCheckTimeout = time.Second * 3 276 277 // eventLoop processes chain head events and also notifies the tx relay backend 278 // about the new head hash and tx state changes 279 func (pool *TxPool) eventLoop() { 280 for { 281 select { 282 case ev := <-pool.chainHeadCh: 283 pool.setNewHead(ev.Block.Header()) 284 // hack in order to avoid hogging the lock; this part will 285 // be replaced by a subsequent PR. 286 time.Sleep(time.Millisecond) 287 288 // System stopped 289 case <-pool.chainHeadSub.Err(): 290 return 291 } 292 } 293 } 294 295 func (pool *TxPool) setNewHead(head *types.Header) { 296 pool.mu.Lock() 297 defer pool.mu.Unlock() 298 299 ctx, cancel := context.WithTimeout(context.Background(), blockCheckTimeout) 300 defer cancel() 301 302 txc, _ := pool.reorgOnNewHead(ctx, head) 303 m, r := txc.getLists() 304 pool.relay.NewHead(pool.head, m, r) 305 pool.homestead = pool.config.IsHomestead(head.Number) 306 pool.signer = types.MakeSigner(pool.config, head.Number) 307 } 308 309 // Stop stops the light transaction pool 310 func (pool *TxPool) Stop() { 311 // Unsubscribe all subscriptions registered from txpool 312 pool.scope.Close() 313 // Unsubscribe subscriptions registered from blockchain 314 pool.chainHeadSub.Unsubscribe() 315 close(pool.quit) 316 log.Info("Transaction pool stopped") 317 } 318 319 // SubscribeTxPreEvent registers a subscription of core.TxPreEvent and 320 // starts sending event to the given channel. 321 func (pool *TxPool) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { 322 return pool.scope.Track(pool.txFeed.Subscribe(ch)) 323 } 324 325 // Stats returns the number of currently pending (locally created) transactions 326 func (pool *TxPool) Stats() (pending int) { 327 pool.mu.RLock() 328 defer pool.mu.RUnlock() 329 330 pending = len(pool.pending) 331 return 332 } 333 334 // validateTx checks whether a transaction is valid according to the consensus rules. 335 func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error { 336 // Validate sender 337 var ( 338 from common.Address 339 err error 340 ) 341 342 // Validate the transaction sender and it's sig. Throw 343 // if the from fields is invalid. 344 if from, err = types.Sender(pool.signer, tx); err != nil { 345 return core.ErrInvalidSender 346 } 347 // Last but not least check for nonce errors 348 currentState := pool.currentState(ctx) 349 if n := currentState.GetNonce(from); n > tx.Nonce() { 350 return core.ErrNonceTooLow 351 } 352 353 // Check the transaction doesn't exceed the current 354 // block limit gas. 355 header := pool.chain.GetHeaderByHash(pool.head) 356 if header.GasLimit < tx.Gas() { 357 return core.ErrGasLimit 358 } 359 360 // Transactions can't be negative. This may never happen 361 // using RLP decoded transactions but may occur if you create 362 // a transaction using the RPC for example. 363 if tx.Value().Sign() < 0 { 364 return core.ErrNegativeValue 365 } 366 367 // Transactor should have enough funds to cover the costs 368 // cost == V + GP * GL 369 if b := currentState.GetBalance(from); b.Cmp(tx.Cost()) < 0 { 370 return core.ErrInsufficientFunds 371 } 372 373 // Should supply enough intrinsic gas 374 gas, err := core.IntrinsicGas(tx.Data(), tx.To() == nil, pool.homestead) 375 if err != nil { 376 return err 377 } 378 if tx.Gas() < gas { 379 return core.ErrIntrinsicGas 380 } 381 return currentState.Error() 382 } 383 384 // add validates a new transaction and sets its state pending if processable. 385 // It also updates the locally stored nonce if necessary. 386 func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error { 387 hash := tx.Hash() 388 389 if self.pending[hash] != nil { 390 return fmt.Errorf("Known transaction (%x)", hash[:4]) 391 } 392 err := self.validateTx(ctx, tx) 393 if err != nil { 394 return err 395 } 396 397 if _, ok := self.pending[hash]; !ok { 398 self.pending[hash] = tx 399 400 nonce := tx.Nonce() + 1 401 402 addr, _ := types.Sender(self.signer, tx) 403 if nonce > self.nonce[addr] { 404 self.nonce[addr] = nonce 405 } 406 407 // Notify the subscribers. This event is posted in a goroutine 408 // because it's possible that somewhere during the post "Remove transaction" 409 // gets called which will then wait for the global tx pool lock and deadlock. 410 go self.txFeed.Send(core.TxPreEvent{Tx: tx}) 411 } 412 413 // Print a log message if low enough level is set 414 log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(self.signer, tx); return from }}, "to", tx.To()) 415 return nil 416 } 417 418 // Add adds a transaction to the pool if valid and passes it to the tx relay 419 // backend 420 func (self *TxPool) Add(ctx context.Context, tx *types.Transaction) error { 421 self.mu.Lock() 422 defer self.mu.Unlock() 423 424 data, err := rlp.EncodeToBytes(tx) 425 if err != nil { 426 return err 427 } 428 429 if err := self.add(ctx, tx); err != nil { 430 return err 431 } 432 //fmt.Println("Send", tx.Hash()) 433 self.relay.Send(types.Transactions{tx}) 434 435 self.chainDb.Put(tx.Hash().Bytes(), data) 436 return nil 437 } 438 439 // AddTransactions adds all valid transactions to the pool and passes them to 440 // the tx relay backend 441 func (self *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) { 442 self.mu.Lock() 443 defer self.mu.Unlock() 444 var sendTx types.Transactions 445 446 for _, tx := range txs { 447 if err := self.add(ctx, tx); err == nil { 448 sendTx = append(sendTx, tx) 449 } 450 } 451 if len(sendTx) > 0 { 452 self.relay.Send(sendTx) 453 } 454 } 455 456 // GetTransaction returns a transaction if it is contained in the pool 457 // and nil otherwise. 458 func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction { 459 // check the txs first 460 if tx, ok := tp.pending[hash]; ok { 461 return tx 462 } 463 return nil 464 } 465 466 // GetTransactions returns all currently processable transactions. 467 // The returned slice may be modified by the caller. 468 func (self *TxPool) GetTransactions() (txs types.Transactions, err error) { 469 self.mu.RLock() 470 defer self.mu.RUnlock() 471 472 txs = make(types.Transactions, len(self.pending)) 473 i := 0 474 for _, tx := range self.pending { 475 txs[i] = tx 476 i++ 477 } 478 return txs, nil 479 } 480 481 // Content retrieves the data content of the transaction pool, returning all the 482 // pending as well as queued transactions, grouped by account and nonce. 483 func (self *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { 484 self.mu.RLock() 485 defer self.mu.RUnlock() 486 487 // Retrieve all the pending transactions and sort by account and by nonce 488 pending := make(map[common.Address]types.Transactions) 489 for _, tx := range self.pending { 490 account, _ := types.Sender(self.signer, tx) 491 pending[account] = append(pending[account], tx) 492 } 493 // There are no queued transactions in a light pool, just return an empty map 494 queued := make(map[common.Address]types.Transactions) 495 return pending, queued 496 } 497 498 // RemoveTransactions removes all given transactions from the pool. 499 func (self *TxPool) RemoveTransactions(txs types.Transactions) { 500 self.mu.Lock() 501 defer self.mu.Unlock() 502 var hashes []common.Hash 503 for _, tx := range txs { 504 //self.RemoveTx(tx.Hash()) 505 hash := tx.Hash() 506 delete(self.pending, hash) 507 self.chainDb.Delete(hash[:]) 508 hashes = append(hashes, hash) 509 } 510 self.relay.Discard(hashes) 511 } 512 513 // RemoveTx removes the transaction with the given hash from the pool. 514 func (pool *TxPool) RemoveTx(hash common.Hash) { 515 pool.mu.Lock() 516 defer pool.mu.Unlock() 517 // delete from pending pool 518 delete(pool.pending, hash) 519 pool.chainDb.Delete(hash[:]) 520 pool.relay.Discard([]common.Hash{hash}) 521 }