github.com/aswedchain/aswed@v1.0.1/les/server_handler.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package les 18 19 import ( 20 "encoding/binary" 21 "encoding/json" 22 "errors" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 "github.com/aswedchain/aswed/common" 28 "github.com/aswedchain/aswed/common/mclock" 29 "github.com/aswedchain/aswed/core" 30 "github.com/aswedchain/aswed/core/rawdb" 31 "github.com/aswedchain/aswed/core/state" 32 "github.com/aswedchain/aswed/core/types" 33 "github.com/aswedchain/aswed/ethdb" 34 lps "github.com/aswedchain/aswed/les/lespay/server" 35 "github.com/aswedchain/aswed/light" 36 "github.com/aswedchain/aswed/log" 37 "github.com/aswedchain/aswed/metrics" 38 "github.com/aswedchain/aswed/p2p" 39 "github.com/aswedchain/aswed/rlp" 40 "github.com/aswedchain/aswed/trie" 41 ) 42 43 const ( 44 softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. 45 estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header 46 ethVersion = 63 // equivalent eth version for the downloader 47 48 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 49 MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request 50 MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request 51 MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request 52 MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request 53 MaxHelperTrieProofsFetch = 64 // Amount of helper tries to be fetched per retrieval request 54 MaxTxSend = 64 // Amount of transactions to be send per request 55 MaxTxStatus = 256 // Amount of transactions to queried per request 56 ) 57 58 var ( 59 errTooManyInvalidRequest = errors.New("too many invalid requests made") 60 errFullClientPool = errors.New("client pool is full") 61 ) 62 63 // serverHandler is responsible for serving light client and process 64 // all incoming light requests. 65 type serverHandler struct { 66 blockchain *core.BlockChain 67 chainDb ethdb.Database 68 txpool *core.TxPool 69 server *LesServer 70 71 closeCh chan struct{} // Channel used to exit all background routines of handler. 72 wg sync.WaitGroup // WaitGroup used to track all background routines of handler. 73 synced func() bool // Callback function used to determine whether local node is synced. 74 75 // Testing fields 76 addTxsSync bool 77 } 78 79 func newServerHandler(server *LesServer, blockchain *core.BlockChain, chainDb ethdb.Database, txpool *core.TxPool, synced func() bool) *serverHandler { 80 handler := &serverHandler{ 81 server: server, 82 blockchain: blockchain, 83 chainDb: chainDb, 84 txpool: txpool, 85 closeCh: make(chan struct{}), 86 synced: synced, 87 } 88 return handler 89 } 90 91 // start starts the server handler. 92 func (h *serverHandler) start() { 93 h.wg.Add(1) 94 go h.broadcastHeaders() 95 } 96 97 // stop stops the server handler. 98 func (h *serverHandler) stop() { 99 close(h.closeCh) 100 h.wg.Wait() 101 } 102 103 // runPeer is the p2p protocol run function for the given version. 104 func (h *serverHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error { 105 peer := newClientPeer(int(version), h.server.config.NetworkId, p, newMeteredMsgWriter(rw, int(version))) 106 defer peer.close() 107 h.wg.Add(1) 108 defer h.wg.Done() 109 return h.handle(peer) 110 } 111 112 func (h *serverHandler) handle(p *clientPeer) error { 113 p.Log().Debug("Light Ethereum peer connected", "name", p.Name()) 114 115 // Execute the LES handshake 116 var ( 117 head = h.blockchain.CurrentHeader() 118 hash = head.Hash() 119 number = head.Number.Uint64() 120 td = h.blockchain.GetTd(hash, number) 121 ) 122 if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), h.server); err != nil { 123 p.Log().Debug("Light Ethereum handshake failed", "err", err) 124 return err 125 } 126 if p.server { 127 if err := h.server.serverset.register(p); err != nil { 128 return err 129 } 130 // connected to another server, no messages expected, just wait for disconnection 131 _, err := p.rw.ReadMsg() 132 return err 133 } 134 // Reject light clients if server is not synced. 135 if !h.synced() { 136 p.Log().Debug("Light server not synced, rejecting peer") 137 return p2p.DiscRequested 138 } 139 defer p.fcClient.Disconnect() 140 141 // Disconnect the inbound peer if it's rejected by clientPool 142 if cap, err := h.server.clientPool.connect(p); cap != p.fcParams.MinRecharge || err != nil { 143 p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool) 144 return errFullClientPool 145 } 146 p.balance, _ = h.server.clientPool.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*lps.NodeBalance) 147 if p.balance == nil { 148 return p2p.DiscRequested 149 } 150 // Register the peer locally 151 if err := h.server.peers.register(p); err != nil { 152 h.server.clientPool.disconnect(p) 153 p.Log().Error("Light Ethereum peer registration failed", "err", err) 154 return err 155 } 156 clientConnectionGauge.Update(int64(h.server.peers.len())) 157 158 var wg sync.WaitGroup // Wait group used to track all in-flight task routines. 159 160 connectedAt := mclock.Now() 161 defer func() { 162 wg.Wait() // Ensure all background task routines have exited. 163 h.server.peers.unregister(p.id) 164 h.server.clientPool.disconnect(p) 165 p.balance = nil 166 clientConnectionGauge.Update(int64(h.server.peers.len())) 167 connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) 168 }() 169 // Mark the peer starts to be served. 170 atomic.StoreUint32(&p.serving, 1) 171 defer atomic.StoreUint32(&p.serving, 0) 172 173 // Spawn a main loop to handle all incoming messages. 174 for { 175 select { 176 case err := <-p.errCh: 177 p.Log().Debug("Failed to send light ethereum response", "err", err) 178 return err 179 default: 180 } 181 if err := h.handleMsg(p, &wg); err != nil { 182 p.Log().Debug("Light Ethereum message handling failed", "err", err) 183 return err 184 } 185 } 186 } 187 188 // handleMsg is invoked whenever an inbound message is received from a remote 189 // peer. The remote connection is torn down upon returning any error. 190 func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { 191 // Read the next message from the remote peer, and ensure it's fully consumed 192 msg, err := p.rw.ReadMsg() 193 if err != nil { 194 return err 195 } 196 p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size) 197 198 // Discard large message which exceeds the limitation. 199 if msg.Size > ProtocolMaxMsgSize { 200 clientErrorMeter.Mark(1) 201 return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) 202 } 203 defer msg.Discard() 204 205 var ( 206 maxCost uint64 207 task *servingTask 208 ) 209 p.responseCount++ 210 responseCount := p.responseCount 211 // accept returns an indicator whether the request can be served. 212 // If so, deduct the max cost from the flow control buffer. 213 accept := func(reqID, reqCnt, maxCnt uint64) bool { 214 // Short circuit if the peer is already frozen or the request is invalid. 215 inSizeCost := h.server.costTracker.realCost(0, msg.Size, 0) 216 if p.isFrozen() || reqCnt == 0 || reqCnt > maxCnt { 217 p.fcClient.OneTimeCost(inSizeCost) 218 return false 219 } 220 // Prepaid max cost units before request been serving. 221 maxCost = p.fcCosts.getMaxCost(msg.Code, reqCnt) 222 accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost) 223 if !accepted { 224 p.freeze() 225 p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge))) 226 p.fcClient.OneTimeCost(inSizeCost) 227 return false 228 } 229 // Create a multi-stage task, estimate the time it takes for the task to 230 // execute, and cache it in the request service queue. 231 factor := h.server.costTracker.globalFactor() 232 if factor < 0.001 { 233 factor = 1 234 p.Log().Error("Invalid global cost factor", "factor", factor) 235 } 236 maxTime := uint64(float64(maxCost) / factor) 237 task = h.server.servingQueue.newTask(p, maxTime, priority) 238 if task.start() { 239 return true 240 } 241 p.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost) 242 return false 243 } 244 // sendResponse sends back the response and updates the flow control statistic. 245 sendResponse := func(reqID, amount uint64, reply *reply, servingTime uint64) { 246 p.responseLock.Lock() 247 defer p.responseLock.Unlock() 248 249 // Short circuit if the client is already frozen. 250 if p.isFrozen() { 251 realCost := h.server.costTracker.realCost(servingTime, msg.Size, 0) 252 p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost) 253 return 254 } 255 // Positive correction buffer value with real cost. 256 var replySize uint32 257 if reply != nil { 258 replySize = reply.size() 259 } 260 var realCost uint64 261 if h.server.costTracker.testing { 262 realCost = maxCost // Assign a fake cost for testing purpose 263 } else { 264 realCost = h.server.costTracker.realCost(servingTime, msg.Size, replySize) 265 if realCost > maxCost { 266 realCost = maxCost 267 } 268 } 269 bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost) 270 if amount != 0 { 271 // Feed cost tracker request serving statistic. 272 h.server.costTracker.updateStats(msg.Code, amount, servingTime, realCost) 273 // Reduce priority "balance" for the specific peer. 274 p.balance.RequestServed(realCost) 275 } 276 if reply != nil { 277 p.queueSend(func() { 278 if err := reply.send(bv); err != nil { 279 select { 280 case p.errCh <- err: 281 default: 282 } 283 } 284 }) 285 } 286 } 287 switch msg.Code { 288 case GetBlockHeadersMsg: 289 p.Log().Trace("Received block header request") 290 if metrics.EnabledExpensive { 291 miscInHeaderPacketsMeter.Mark(1) 292 miscInHeaderTrafficMeter.Mark(int64(msg.Size)) 293 } 294 var req struct { 295 ReqID uint64 296 Query getBlockHeadersData 297 } 298 if err := msg.Decode(&req); err != nil { 299 clientErrorMeter.Mark(1) 300 return errResp(ErrDecode, "%v: %v", msg, err) 301 } 302 query := req.Query 303 if accept(req.ReqID, query.Amount, MaxHeaderFetch) { 304 wg.Add(1) 305 go func() { 306 defer wg.Done() 307 hashMode := query.Origin.Hash != (common.Hash{}) 308 first := true 309 maxNonCanonical := uint64(100) 310 311 // Gather headers until the fetch or network limits is reached 312 var ( 313 bytes common.StorageSize 314 headers []*types.Header 315 unknown bool 316 ) 317 for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit { 318 if !first && !task.waitOrStop() { 319 sendResponse(req.ReqID, 0, nil, task.servingTime) 320 return 321 } 322 // Retrieve the next header satisfying the query 323 var origin *types.Header 324 if hashMode { 325 if first { 326 origin = h.blockchain.GetHeaderByHash(query.Origin.Hash) 327 if origin != nil { 328 query.Origin.Number = origin.Number.Uint64() 329 } 330 } else { 331 origin = h.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number) 332 } 333 } else { 334 origin = h.blockchain.GetHeaderByNumber(query.Origin.Number) 335 } 336 if origin == nil { 337 p.bumpInvalid() 338 break 339 } 340 headers = append(headers, origin) 341 bytes += estHeaderRlpSize 342 343 // Advance to the next header of the query 344 switch { 345 case hashMode && query.Reverse: 346 // Hash based traversal towards the genesis block 347 ancestor := query.Skip + 1 348 if ancestor == 0 { 349 unknown = true 350 } else { 351 query.Origin.Hash, query.Origin.Number = h.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) 352 unknown = query.Origin.Hash == common.Hash{} 353 } 354 case hashMode && !query.Reverse: 355 // Hash based traversal towards the leaf block 356 var ( 357 current = origin.Number.Uint64() 358 next = current + query.Skip + 1 359 ) 360 if next <= current { 361 infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") 362 p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos) 363 unknown = true 364 } else { 365 if header := h.blockchain.GetHeaderByNumber(next); header != nil { 366 nextHash := header.Hash() 367 expOldHash, _ := h.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) 368 if expOldHash == query.Origin.Hash { 369 query.Origin.Hash, query.Origin.Number = nextHash, next 370 } else { 371 unknown = true 372 } 373 } else { 374 unknown = true 375 } 376 } 377 case query.Reverse: 378 // Number based traversal towards the genesis block 379 if query.Origin.Number >= query.Skip+1 { 380 query.Origin.Number -= query.Skip + 1 381 } else { 382 unknown = true 383 } 384 385 case !query.Reverse: 386 // Number based traversal towards the leaf block 387 query.Origin.Number += query.Skip + 1 388 } 389 first = false 390 } 391 reply := p.replyBlockHeaders(req.ReqID, headers) 392 sendResponse(req.ReqID, query.Amount, reply, task.done()) 393 if metrics.EnabledExpensive { 394 miscOutHeaderPacketsMeter.Mark(1) 395 miscOutHeaderTrafficMeter.Mark(int64(reply.size())) 396 miscServingTimeHeaderTimer.Update(time.Duration(task.servingTime)) 397 } 398 }() 399 } 400 401 case GetBlockBodiesMsg: 402 p.Log().Trace("Received block bodies request") 403 if metrics.EnabledExpensive { 404 miscInBodyPacketsMeter.Mark(1) 405 miscInBodyTrafficMeter.Mark(int64(msg.Size)) 406 } 407 var req struct { 408 ReqID uint64 409 Hashes []common.Hash 410 } 411 if err := msg.Decode(&req); err != nil { 412 clientErrorMeter.Mark(1) 413 return errResp(ErrDecode, "msg %v: %v", msg, err) 414 } 415 var ( 416 bytes int 417 bodies []rlp.RawValue 418 ) 419 reqCnt := len(req.Hashes) 420 if accept(req.ReqID, uint64(reqCnt), MaxBodyFetch) { 421 wg.Add(1) 422 go func() { 423 defer wg.Done() 424 for i, hash := range req.Hashes { 425 if i != 0 && !task.waitOrStop() { 426 sendResponse(req.ReqID, 0, nil, task.servingTime) 427 return 428 } 429 if bytes >= softResponseLimit { 430 break 431 } 432 body := h.blockchain.GetBodyRLP(hash) 433 if body == nil { 434 p.bumpInvalid() 435 continue 436 } 437 bodies = append(bodies, body) 438 bytes += len(body) 439 } 440 reply := p.replyBlockBodiesRLP(req.ReqID, bodies) 441 sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) 442 if metrics.EnabledExpensive { 443 miscOutBodyPacketsMeter.Mark(1) 444 miscOutBodyTrafficMeter.Mark(int64(reply.size())) 445 miscServingTimeBodyTimer.Update(time.Duration(task.servingTime)) 446 } 447 }() 448 } 449 450 case GetCodeMsg: 451 p.Log().Trace("Received code request") 452 if metrics.EnabledExpensive { 453 miscInCodePacketsMeter.Mark(1) 454 miscInCodeTrafficMeter.Mark(int64(msg.Size)) 455 } 456 var req struct { 457 ReqID uint64 458 Reqs []CodeReq 459 } 460 if err := msg.Decode(&req); err != nil { 461 clientErrorMeter.Mark(1) 462 return errResp(ErrDecode, "msg %v: %v", msg, err) 463 } 464 var ( 465 bytes int 466 data [][]byte 467 ) 468 reqCnt := len(req.Reqs) 469 if accept(req.ReqID, uint64(reqCnt), MaxCodeFetch) { 470 wg.Add(1) 471 go func() { 472 defer wg.Done() 473 for i, request := range req.Reqs { 474 if i != 0 && !task.waitOrStop() { 475 sendResponse(req.ReqID, 0, nil, task.servingTime) 476 return 477 } 478 // Look up the root hash belonging to the request 479 header := h.blockchain.GetHeaderByHash(request.BHash) 480 if header == nil { 481 p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash) 482 p.bumpInvalid() 483 continue 484 } 485 // Refuse to search stale state data in the database since looking for 486 // a non-exist key is kind of expensive. 487 local := h.blockchain.CurrentHeader().Number.Uint64() 488 if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local { 489 p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local) 490 p.bumpInvalid() 491 continue 492 } 493 triedb := h.blockchain.StateCache().TrieDB() 494 495 account, err := h.getAccount(triedb, header.Root, common.BytesToHash(request.AccKey)) 496 if err != nil { 497 p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) 498 p.bumpInvalid() 499 continue 500 } 501 code, err := h.blockchain.StateCache().ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash)) 502 if err != nil { 503 p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err) 504 continue 505 } 506 // Accumulate the code and abort if enough data was retrieved 507 data = append(data, code) 508 if bytes += len(code); bytes >= softResponseLimit { 509 break 510 } 511 } 512 reply := p.replyCode(req.ReqID, data) 513 sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) 514 if metrics.EnabledExpensive { 515 miscOutCodePacketsMeter.Mark(1) 516 miscOutCodeTrafficMeter.Mark(int64(reply.size())) 517 miscServingTimeCodeTimer.Update(time.Duration(task.servingTime)) 518 } 519 }() 520 } 521 522 case GetReceiptsMsg: 523 p.Log().Trace("Received receipts request") 524 if metrics.EnabledExpensive { 525 miscInReceiptPacketsMeter.Mark(1) 526 miscInReceiptTrafficMeter.Mark(int64(msg.Size)) 527 } 528 var req struct { 529 ReqID uint64 530 Hashes []common.Hash 531 } 532 if err := msg.Decode(&req); err != nil { 533 clientErrorMeter.Mark(1) 534 return errResp(ErrDecode, "msg %v: %v", msg, err) 535 } 536 var ( 537 bytes int 538 receipts []rlp.RawValue 539 ) 540 reqCnt := len(req.Hashes) 541 if accept(req.ReqID, uint64(reqCnt), MaxReceiptFetch) { 542 wg.Add(1) 543 go func() { 544 defer wg.Done() 545 for i, hash := range req.Hashes { 546 if i != 0 && !task.waitOrStop() { 547 sendResponse(req.ReqID, 0, nil, task.servingTime) 548 return 549 } 550 if bytes >= softResponseLimit { 551 break 552 } 553 // Retrieve the requested block's receipts, skipping if unknown to us 554 results := h.blockchain.GetReceiptsByHash(hash) 555 if results == nil { 556 if header := h.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { 557 p.bumpInvalid() 558 continue 559 } 560 } 561 // If known, encode and queue for response packet 562 if encoded, err := rlp.EncodeToBytes(results); err != nil { 563 log.Error("Failed to encode receipt", "err", err) 564 } else { 565 receipts = append(receipts, encoded) 566 bytes += len(encoded) 567 } 568 } 569 reply := p.replyReceiptsRLP(req.ReqID, receipts) 570 sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) 571 if metrics.EnabledExpensive { 572 miscOutReceiptPacketsMeter.Mark(1) 573 miscOutReceiptTrafficMeter.Mark(int64(reply.size())) 574 miscServingTimeReceiptTimer.Update(time.Duration(task.servingTime)) 575 } 576 }() 577 } 578 579 case GetProofsV2Msg: 580 p.Log().Trace("Received les/2 proofs request") 581 if metrics.EnabledExpensive { 582 miscInTrieProofPacketsMeter.Mark(1) 583 miscInTrieProofTrafficMeter.Mark(int64(msg.Size)) 584 } 585 var req struct { 586 ReqID uint64 587 Reqs []ProofReq 588 } 589 if err := msg.Decode(&req); err != nil { 590 clientErrorMeter.Mark(1) 591 return errResp(ErrDecode, "msg %v: %v", msg, err) 592 } 593 // Gather state data until the fetch or network limits is reached 594 var ( 595 lastBHash common.Hash 596 root common.Hash 597 ) 598 reqCnt := len(req.Reqs) 599 if accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) { 600 wg.Add(1) 601 go func() { 602 defer wg.Done() 603 nodes := light.NewNodeSet() 604 605 for i, request := range req.Reqs { 606 if i != 0 && !task.waitOrStop() { 607 sendResponse(req.ReqID, 0, nil, task.servingTime) 608 return 609 } 610 // Look up the root hash belonging to the request 611 var ( 612 header *types.Header 613 trie state.Trie 614 ) 615 if request.BHash != lastBHash { 616 root, lastBHash = common.Hash{}, request.BHash 617 618 if header = h.blockchain.GetHeaderByHash(request.BHash); header == nil { 619 p.Log().Warn("Failed to retrieve header for proof", "hash", request.BHash) 620 p.bumpInvalid() 621 continue 622 } 623 // Refuse to search stale state data in the database since looking for 624 // a non-exist key is kind of expensive. 625 local := h.blockchain.CurrentHeader().Number.Uint64() 626 if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local { 627 p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local) 628 p.bumpInvalid() 629 continue 630 } 631 root = header.Root 632 } 633 // If a header lookup failed (non existent), ignore subsequent requests for the same header 634 if root == (common.Hash{}) { 635 p.bumpInvalid() 636 continue 637 } 638 // Open the account or storage trie for the request 639 statedb := h.blockchain.StateCache() 640 641 switch len(request.AccKey) { 642 case 0: 643 // No account key specified, open an account trie 644 trie, err = statedb.OpenTrie(root) 645 if trie == nil || err != nil { 646 p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", root, "err", err) 647 continue 648 } 649 default: 650 // Account key specified, open a storage trie 651 account, err := h.getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey)) 652 if err != nil { 653 p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) 654 p.bumpInvalid() 655 continue 656 } 657 trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root) 658 if trie == nil || err != nil { 659 p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err) 660 continue 661 } 662 } 663 // Prove the user's request from the account or stroage trie 664 if err := trie.Prove(request.Key, request.FromLevel, nodes); err != nil { 665 p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err) 666 continue 667 } 668 if nodes.DataSize() >= softResponseLimit { 669 break 670 } 671 } 672 reply := p.replyProofsV2(req.ReqID, nodes.NodeList()) 673 sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) 674 if metrics.EnabledExpensive { 675 miscOutTrieProofPacketsMeter.Mark(1) 676 miscOutTrieProofTrafficMeter.Mark(int64(reply.size())) 677 miscServingTimeTrieProofTimer.Update(time.Duration(task.servingTime)) 678 } 679 }() 680 } 681 682 case GetHelperTrieProofsMsg: 683 p.Log().Trace("Received helper trie proof request") 684 if metrics.EnabledExpensive { 685 miscInHelperTriePacketsMeter.Mark(1) 686 miscInHelperTrieTrafficMeter.Mark(int64(msg.Size)) 687 } 688 var req struct { 689 ReqID uint64 690 Reqs []HelperTrieReq 691 } 692 if err := msg.Decode(&req); err != nil { 693 clientErrorMeter.Mark(1) 694 return errResp(ErrDecode, "msg %v: %v", msg, err) 695 } 696 // Gather state data until the fetch or network limits is reached 697 var ( 698 auxBytes int 699 auxData [][]byte 700 ) 701 reqCnt := len(req.Reqs) 702 if accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) { 703 wg.Add(1) 704 go func() { 705 defer wg.Done() 706 var ( 707 lastIdx uint64 708 lastType uint 709 root common.Hash 710 auxTrie *trie.Trie 711 ) 712 nodes := light.NewNodeSet() 713 for i, request := range req.Reqs { 714 if i != 0 && !task.waitOrStop() { 715 sendResponse(req.ReqID, 0, nil, task.servingTime) 716 return 717 } 718 if auxTrie == nil || request.Type != lastType || request.TrieIdx != lastIdx { 719 auxTrie, lastType, lastIdx = nil, request.Type, request.TrieIdx 720 721 var prefix string 722 if root, prefix = h.getHelperTrie(request.Type, request.TrieIdx); root != (common.Hash{}) { 723 auxTrie, _ = trie.New(root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix))) 724 } 725 } 726 if request.AuxReq == auxRoot { 727 var data []byte 728 if root != (common.Hash{}) { 729 data = root[:] 730 } 731 auxData = append(auxData, data) 732 auxBytes += len(data) 733 } else { 734 if auxTrie != nil { 735 auxTrie.Prove(request.Key, request.FromLevel, nodes) 736 } 737 if request.AuxReq != 0 { 738 data := h.getAuxiliaryHeaders(request) 739 auxData = append(auxData, data) 740 auxBytes += len(data) 741 } 742 } 743 if nodes.DataSize()+auxBytes >= softResponseLimit { 744 break 745 } 746 } 747 reply := p.replyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}) 748 sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) 749 if metrics.EnabledExpensive { 750 miscOutHelperTriePacketsMeter.Mark(1) 751 miscOutHelperTrieTrafficMeter.Mark(int64(reply.size())) 752 miscServingTimeHelperTrieTimer.Update(time.Duration(task.servingTime)) 753 } 754 }() 755 } 756 757 case SendTxV2Msg: 758 p.Log().Trace("Received new transactions") 759 if metrics.EnabledExpensive { 760 miscInTxsPacketsMeter.Mark(1) 761 miscInTxsTrafficMeter.Mark(int64(msg.Size)) 762 } 763 var req struct { 764 ReqID uint64 765 Txs []*types.Transaction 766 } 767 if err := msg.Decode(&req); err != nil { 768 clientErrorMeter.Mark(1) 769 return errResp(ErrDecode, "msg %v: %v", msg, err) 770 } 771 reqCnt := len(req.Txs) 772 if accept(req.ReqID, uint64(reqCnt), MaxTxSend) { 773 wg.Add(1) 774 go func() { 775 defer wg.Done() 776 stats := make([]light.TxStatus, len(req.Txs)) 777 for i, tx := range req.Txs { 778 if i != 0 && !task.waitOrStop() { 779 return 780 } 781 hash := tx.Hash() 782 stats[i] = h.txStatus(hash) 783 if stats[i].Status == core.TxStatusUnknown { 784 addFn := h.txpool.AddRemotes 785 // Add txs synchronously for testing purpose 786 if h.addTxsSync { 787 addFn = h.txpool.AddRemotesSync 788 } 789 if errs := addFn([]*types.Transaction{tx}); errs[0] != nil { 790 stats[i].Error = errs[0].Error() 791 continue 792 } 793 stats[i] = h.txStatus(hash) 794 } 795 } 796 reply := p.replyTxStatus(req.ReqID, stats) 797 sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) 798 if metrics.EnabledExpensive { 799 miscOutTxsPacketsMeter.Mark(1) 800 miscOutTxsTrafficMeter.Mark(int64(reply.size())) 801 miscServingTimeTxTimer.Update(time.Duration(task.servingTime)) 802 } 803 }() 804 } 805 806 case GetTxStatusMsg: 807 p.Log().Trace("Received transaction status query request") 808 if metrics.EnabledExpensive { 809 miscInTxStatusPacketsMeter.Mark(1) 810 miscInTxStatusTrafficMeter.Mark(int64(msg.Size)) 811 } 812 var req struct { 813 ReqID uint64 814 Hashes []common.Hash 815 } 816 if err := msg.Decode(&req); err != nil { 817 clientErrorMeter.Mark(1) 818 return errResp(ErrDecode, "msg %v: %v", msg, err) 819 } 820 reqCnt := len(req.Hashes) 821 if accept(req.ReqID, uint64(reqCnt), MaxTxStatus) { 822 wg.Add(1) 823 go func() { 824 defer wg.Done() 825 stats := make([]light.TxStatus, len(req.Hashes)) 826 for i, hash := range req.Hashes { 827 if i != 0 && !task.waitOrStop() { 828 sendResponse(req.ReqID, 0, nil, task.servingTime) 829 return 830 } 831 stats[i] = h.txStatus(hash) 832 } 833 reply := p.replyTxStatus(req.ReqID, stats) 834 sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) 835 if metrics.EnabledExpensive { 836 miscOutTxStatusPacketsMeter.Mark(1) 837 miscOutTxStatusTrafficMeter.Mark(int64(reply.size())) 838 miscServingTimeTxStatusTimer.Update(time.Duration(task.servingTime)) 839 } 840 }() 841 } 842 843 default: 844 p.Log().Trace("Received invalid message", "code", msg.Code) 845 clientErrorMeter.Mark(1) 846 return errResp(ErrInvalidMsgCode, "%v", msg.Code) 847 } 848 // If the client has made too much invalid request(e.g. request a non-existent data), 849 // reject them to prevent SPAM attack. 850 if p.getInvalid() > maxRequestErrors { 851 clientErrorMeter.Mark(1) 852 return errTooManyInvalidRequest 853 } 854 return nil 855 } 856 857 // getAccount retrieves an account from the state based on root. 858 func (h *serverHandler) getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) { 859 trie, err := trie.New(root, triedb) 860 if err != nil { 861 return state.Account{}, err 862 } 863 blob, err := trie.TryGet(hash[:]) 864 if err != nil { 865 return state.Account{}, err 866 } 867 var account state.Account 868 if err = rlp.DecodeBytes(blob, &account); err != nil { 869 return state.Account{}, err 870 } 871 return account, nil 872 } 873 874 // getHelperTrie returns the post-processed trie root for the given trie ID and section index 875 func (h *serverHandler) getHelperTrie(typ uint, index uint64) (common.Hash, string) { 876 switch typ { 877 case htCanonical: 878 sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.ChtSize-1) 879 return light.GetChtRoot(h.chainDb, index, sectionHead), light.ChtTablePrefix 880 case htBloomBits: 881 sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.BloomTrieSize-1) 882 return light.GetBloomTrieRoot(h.chainDb, index, sectionHead), light.BloomTrieTablePrefix 883 } 884 return common.Hash{}, "" 885 } 886 887 // getAuxiliaryHeaders returns requested auxiliary headers for the CHT request. 888 func (h *serverHandler) getAuxiliaryHeaders(req HelperTrieReq) []byte { 889 if req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8 { 890 blockNum := binary.BigEndian.Uint64(req.Key) 891 hash := rawdb.ReadCanonicalHash(h.chainDb, blockNum) 892 return rawdb.ReadHeaderRLP(h.chainDb, hash, blockNum) 893 } 894 return nil 895 } 896 897 // txStatus returns the status of a specified transaction. 898 func (h *serverHandler) txStatus(hash common.Hash) light.TxStatus { 899 var stat light.TxStatus 900 // Looking the transaction in txpool first. 901 stat.Status = h.txpool.Status([]common.Hash{hash})[0] 902 903 // If the transaction is unknown to the pool, try looking it up locally. 904 if stat.Status == core.TxStatusUnknown { 905 lookup := h.blockchain.GetTransactionLookup(hash) 906 if lookup != nil { 907 stat.Status = core.TxStatusIncluded 908 stat.Lookup = lookup 909 } 910 } 911 return stat 912 } 913 914 // broadcastHeaders broadcasts new block information to all connected light 915 // clients. According to the agreement between client and server, server should 916 // only broadcast new announcement if the total difficulty is higher than the 917 // last one. Besides server will add the signature if client requires. 918 func (h *serverHandler) broadcastHeaders() { 919 defer h.wg.Done() 920 921 headCh := make(chan core.ChainHeadEvent, 10) 922 headSub := h.blockchain.SubscribeChainHeadEvent(headCh) 923 defer headSub.Unsubscribe() 924 925 var ( 926 lastHead *types.Header 927 lastTd = common.Big0 928 ) 929 for { 930 select { 931 case ev := <-headCh: 932 peers := h.server.peers.allPeers() 933 if len(peers) == 0 { 934 continue 935 } 936 header := ev.Block.Header() 937 hash, number := header.Hash(), header.Number.Uint64() 938 td := h.blockchain.GetTd(hash, number) 939 if td == nil || td.Cmp(lastTd) <= 0 { 940 continue 941 } 942 var reorg uint64 943 if lastHead != nil { 944 reorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(h.chainDb, header, lastHead).Number.Uint64() 945 } 946 lastHead, lastTd = header, td 947 948 log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg) 949 var ( 950 signed bool 951 signedAnnounce announceData 952 ) 953 announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg} 954 for _, p := range peers { 955 p := p 956 switch p.announceType { 957 case announceTypeSimple: 958 if !p.queueSend(func() { p.sendAnnounce(announce) }) { 959 log.Debug("Drop announcement because queue is full", "number", number, "hash", hash) 960 } 961 case announceTypeSigned: 962 if !signed { 963 signedAnnounce = announce 964 signedAnnounce.sign(h.server.privateKey) 965 signed = true 966 } 967 if !p.queueSend(func() { p.sendAnnounce(signedAnnounce) }) { 968 log.Debug("Drop announcement because queue is full", "number", number, "hash", hash) 969 } 970 } 971 } 972 case <-h.closeCh: 973 return 974 } 975 } 976 }