github.com/linapex/ethereum-dpos-chinese@v0.0.0-20190316121959-b78b3a4a1ece/swarm/network/stream/stream.go (about) 1 2 //<developer> 3 // <name>linapex 曹一峰</name> 4 // <email>linapex@163.com</email> 5 // <wx>superexc</wx> 6 // <qqgroup>128148617</qqgroup> 7 // <url>https://jsq.ink</url> 8 // <role>pku engineer</role> 9 // <date>2019-03-16 12:09:48</date> 10 //</624342676082266112> 11 12 // 13 // 14 // 15 // 16 // 17 // 18 // 19 // 20 // 21 // 22 // 23 // 24 // 25 // 26 // 27 28 package stream 29 30 import ( 31 "context" 32 "fmt" 33 "math" 34 "sync" 35 "time" 36 37 "github.com/ethereum/go-ethereum/metrics" 38 "github.com/ethereum/go-ethereum/p2p" 39 "github.com/ethereum/go-ethereum/p2p/discover" 40 "github.com/ethereum/go-ethereum/p2p/protocols" 41 "github.com/ethereum/go-ethereum/rpc" 42 "github.com/ethereum/go-ethereum/swarm/log" 43 "github.com/ethereum/go-ethereum/swarm/network" 44 "github.com/ethereum/go-ethereum/swarm/network/stream/intervals" 45 "github.com/ethereum/go-ethereum/swarm/pot" 46 "github.com/ethereum/go-ethereum/swarm/spancontext" 47 "github.com/ethereum/go-ethereum/swarm/state" 48 "github.com/ethereum/go-ethereum/swarm/storage" 49 opentracing "github.com/opentracing/opentracing-go" 50 ) 51 52 const ( 53 Low uint8 = iota 54 Mid 55 High 56 Top 57 PriorityQueue // 58 PriorityQueueCap = 32 // 59 HashSize = 32 60 ) 61 62 // 63 type Registry struct { 64 api *API 65 addr *network.BzzAddr 66 skipCheck bool 67 clientMu sync.RWMutex 68 serverMu sync.RWMutex 69 peersMu sync.RWMutex 70 serverFuncs map[string]func(*Peer, string, bool) (Server, error) 71 clientFuncs map[string]func(*Peer, string, bool) (Client, error) 72 peers map[discover.NodeID]*Peer 73 delivery *Delivery 74 intervalsStore state.Store 75 doRetrieve bool 76 } 77 78 // 79 type RegistryOptions struct { 80 SkipCheck bool 81 DoSync bool 82 DoRetrieve bool 83 SyncUpdateDelay time.Duration 84 } 85 86 // 87 func NewRegistry(addr *network.BzzAddr, delivery *Delivery, db *storage.DBAPI, intervalsStore state.Store, options *RegistryOptions) *Registry { 88 if options == nil { 89 options = &RegistryOptions{} 90 } 91 if options.SyncUpdateDelay <= 0 { 92 options.SyncUpdateDelay = 15 * time.Second 93 } 94 streamer := &Registry{ 95 addr: addr, 96 skipCheck: options.SkipCheck, 97 serverFuncs: make(map[string]func(*Peer, string, bool) (Server, error)), 98 clientFuncs: make(map[string]func(*Peer, string, bool) (Client, error)), 99 peers: make(map[discover.NodeID]*Peer), 100 delivery: delivery, 101 intervalsStore: intervalsStore, 102 doRetrieve: options.DoRetrieve, 103 } 104 streamer.api = NewAPI(streamer) 105 delivery.getPeer = streamer.getPeer 106 streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, _ bool) (Server, error) { 107 return NewSwarmChunkServer(delivery.db), nil 108 }) 109 streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) { 110 return NewSwarmSyncerClient(p, delivery.db, false, NewStream(swarmChunkServerStreamName, t, live)) 111 }) 112 RegisterSwarmSyncerServer(streamer, db) 113 RegisterSwarmSyncerClient(streamer, db) 114 115 if options.DoSync { 116 // 117 // 118 // 119 // 120 // 121 // 122 latestIntC := func(in <-chan int) <-chan int { 123 out := make(chan int, 1) 124 125 go func() { 126 defer close(out) 127 128 for i := range in { 129 select { 130 case <-out: 131 default: 132 } 133 out <- i 134 } 135 }() 136 137 return out 138 } 139 140 go func() { 141 // 142 time.Sleep(options.SyncUpdateDelay) 143 144 kad := streamer.delivery.overlay.(*network.Kademlia) 145 depthC := latestIntC(kad.NeighbourhoodDepthC()) 146 addressBookSizeC := latestIntC(kad.AddrCountC()) 147 148 // 149 streamer.updateSyncing() 150 151 for depth := range depthC { 152 log.Debug("Kademlia neighbourhood depth change", "depth", depth) 153 154 // 155 // 156 // 157 timer := time.NewTimer(options.SyncUpdateDelay) 158 // 159 // 160 maxTimer := time.NewTimer(3 * time.Minute) 161 loop: 162 for { 163 select { 164 case <-maxTimer.C: 165 // 166 log.Trace("Sync subscriptions update on hard timeout") 167 // 168 streamer.updateSyncing() 169 break loop 170 case <-timer.C: 171 // 172 // 173 log.Trace("Sync subscriptions update") 174 // 175 streamer.updateSyncing() 176 break loop 177 case size := <-addressBookSizeC: 178 log.Trace("Kademlia address book size changed on depth change", "size", size) 179 // 180 // 181 if !timer.Stop() { 182 <-timer.C 183 } 184 timer.Reset(options.SyncUpdateDelay) 185 } 186 } 187 timer.Stop() 188 maxTimer.Stop() 189 } 190 }() 191 } 192 193 return streamer 194 } 195 196 // 197 func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) { 198 r.clientMu.Lock() 199 defer r.clientMu.Unlock() 200 201 r.clientFuncs[stream] = f 202 } 203 204 // 205 func (r *Registry) RegisterServerFunc(stream string, f func(*Peer, string, bool) (Server, error)) { 206 r.serverMu.Lock() 207 defer r.serverMu.Unlock() 208 209 r.serverFuncs[stream] = f 210 } 211 212 // 213 func (r *Registry) GetClientFunc(stream string) (func(*Peer, string, bool) (Client, error), error) { 214 r.clientMu.RLock() 215 defer r.clientMu.RUnlock() 216 217 f := r.clientFuncs[stream] 218 if f == nil { 219 return nil, fmt.Errorf("stream %v not registered", stream) 220 } 221 return f, nil 222 } 223 224 // 225 func (r *Registry) GetServerFunc(stream string) (func(*Peer, string, bool) (Server, error), error) { 226 r.serverMu.RLock() 227 defer r.serverMu.RUnlock() 228 229 f := r.serverFuncs[stream] 230 if f == nil { 231 return nil, fmt.Errorf("stream %v not registered", stream) 232 } 233 return f, nil 234 } 235 236 func (r *Registry) RequestSubscription(peerId discover.NodeID, s Stream, h *Range, prio uint8) error { 237 // 238 if _, err := r.GetServerFunc(s.Name); err != nil { 239 return err 240 } 241 242 peer := r.getPeer(peerId) 243 if peer == nil { 244 return fmt.Errorf("peer not found %v", peerId) 245 } 246 247 if _, err := peer.getServer(s); err != nil { 248 if e, ok := err.(*notFoundError); ok && e.t == "server" { 249 // 250 log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h) 251 return peer.Send(context.TODO(), &RequestSubscriptionMsg{ 252 Stream: s, 253 History: h, 254 Priority: prio, 255 }) 256 } 257 return err 258 } 259 log.Trace("RequestSubscription: already subscribed", "peer", peerId, "stream", s, "history", h) 260 return nil 261 } 262 263 // 264 func (r *Registry) Subscribe(peerId discover.NodeID, s Stream, h *Range, priority uint8) error { 265 // 266 if _, err := r.GetClientFunc(s.Name); err != nil { 267 return err 268 } 269 270 peer := r.getPeer(peerId) 271 if peer == nil { 272 return fmt.Errorf("peer not found %v", peerId) 273 } 274 275 var to uint64 276 if !s.Live && h != nil { 277 to = h.To 278 } 279 280 err := peer.setClientParams(s, newClientParams(priority, to)) 281 if err != nil { 282 return err 283 } 284 285 if s.Live && h != nil { 286 if err := peer.setClientParams( 287 getHistoryStream(s), 288 newClientParams(getHistoryPriority(priority), h.To), 289 ); err != nil { 290 return err 291 } 292 } 293 294 msg := &SubscribeMsg{ 295 Stream: s, 296 History: h, 297 Priority: priority, 298 } 299 log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h) 300 301 return peer.SendPriority(context.TODO(), msg, priority) 302 } 303 304 func (r *Registry) Unsubscribe(peerId discover.NodeID, s Stream) error { 305 peer := r.getPeer(peerId) 306 if peer == nil { 307 return fmt.Errorf("peer not found %v", peerId) 308 } 309 310 msg := &UnsubscribeMsg{ 311 Stream: s, 312 } 313 log.Debug("Unsubscribe ", "peer", peerId, "stream", s) 314 315 if err := peer.Send(context.TODO(), msg); err != nil { 316 return err 317 } 318 return peer.removeClient(s) 319 } 320 321 // 322 // 323 func (r *Registry) Quit(peerId discover.NodeID, s Stream) error { 324 peer := r.getPeer(peerId) 325 if peer == nil { 326 log.Debug("stream quit: peer not found", "peer", peerId, "stream", s) 327 // 328 return nil 329 } 330 331 msg := &QuitMsg{ 332 Stream: s, 333 } 334 log.Debug("Quit ", "peer", peerId, "stream", s) 335 336 return peer.Send(context.TODO(), msg) 337 } 338 339 func (r *Registry) Retrieve(ctx context.Context, chunk *storage.Chunk) error { 340 var sp opentracing.Span 341 ctx, sp = spancontext.StartSpan( 342 ctx, 343 "registry.retrieve") 344 defer sp.Finish() 345 346 return r.delivery.RequestFromPeers(ctx, chunk.Addr[:], r.skipCheck) 347 } 348 349 func (r *Registry) NodeInfo() interface{} { 350 return nil 351 } 352 353 func (r *Registry) PeerInfo(id discover.NodeID) interface{} { 354 return nil 355 } 356 357 func (r *Registry) Close() error { 358 return r.intervalsStore.Close() 359 } 360 361 func (r *Registry) getPeer(peerId discover.NodeID) *Peer { 362 r.peersMu.RLock() 363 defer r.peersMu.RUnlock() 364 365 return r.peers[peerId] 366 } 367 368 func (r *Registry) setPeer(peer *Peer) { 369 r.peersMu.Lock() 370 r.peers[peer.ID()] = peer 371 metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers))) 372 r.peersMu.Unlock() 373 } 374 375 func (r *Registry) deletePeer(peer *Peer) { 376 r.peersMu.Lock() 377 delete(r.peers, peer.ID()) 378 metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers))) 379 r.peersMu.Unlock() 380 } 381 382 func (r *Registry) peersCount() (c int) { 383 r.peersMu.Lock() 384 c = len(r.peers) 385 r.peersMu.Unlock() 386 return 387 } 388 389 // 390 func (r *Registry) Run(p *network.BzzPeer) error { 391 sp := NewPeer(p.Peer, r) 392 r.setPeer(sp) 393 defer r.deletePeer(sp) 394 defer close(sp.quit) 395 defer sp.close() 396 397 if r.doRetrieve { 398 err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", false), nil, Top) 399 if err != nil { 400 return err 401 } 402 } 403 404 return sp.Run(sp.HandleMsg) 405 } 406 407 // 408 // 409 // 410 // 411 func (r *Registry) updateSyncing() { 412 // 413 kad := r.delivery.overlay.(*network.Kademlia) 414 415 // 416 // 417 // 418 subs := make(map[discover.NodeID]map[Stream]struct{}) 419 r.peersMu.RLock() 420 for id, peer := range r.peers { 421 peer.serverMu.RLock() 422 for stream := range peer.servers { 423 if stream.Name == "SYNC" { 424 if _, ok := subs[id]; !ok { 425 subs[id] = make(map[Stream]struct{}) 426 } 427 subs[id][stream] = struct{}{} 428 } 429 } 430 peer.serverMu.RUnlock() 431 } 432 r.peersMu.RUnlock() 433 434 // 435 kad.EachBin(r.addr.Over(), pot.DefaultPof(256), 0, func(conn network.OverlayConn, bin int) bool { 436 p := conn.(network.Peer) 437 log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr.ID(), p.ID(), bin)) 438 439 // 440 stream := NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true) 441 if streams, ok := subs[p.ID()]; ok { 442 // 443 delete(streams, stream) 444 delete(streams, getHistoryStream(stream)) 445 } 446 err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High) 447 if err != nil { 448 log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream) 449 return false 450 } 451 return true 452 }) 453 454 // 455 for id, streams := range subs { 456 if len(streams) == 0 { 457 continue 458 } 459 peer := r.getPeer(id) 460 if peer == nil { 461 continue 462 } 463 for stream := range streams { 464 log.Debug("Remove sync server", "peer", id, "stream", stream) 465 err := r.Quit(peer.ID(), stream) 466 if err != nil && err != p2p.ErrShuttingDown { 467 log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream) 468 } 469 } 470 } 471 } 472 473 func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error { 474 peer := protocols.NewPeer(p, rw, Spec) 475 bzzPeer := network.NewBzzTestPeer(peer, r.addr) 476 r.delivery.overlay.On(bzzPeer) 477 defer r.delivery.overlay.Off(bzzPeer) 478 return r.Run(bzzPeer) 479 } 480 481 // 482 func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error { 483 switch msg := msg.(type) { 484 485 case *SubscribeMsg: 486 return p.handleSubscribeMsg(ctx, msg) 487 488 case *SubscribeErrorMsg: 489 return p.handleSubscribeErrorMsg(msg) 490 491 case *UnsubscribeMsg: 492 return p.handleUnsubscribeMsg(msg) 493 494 case *OfferedHashesMsg: 495 return p.handleOfferedHashesMsg(ctx, msg) 496 497 case *TakeoverProofMsg: 498 return p.handleTakeoverProofMsg(ctx, msg) 499 500 case *WantedHashesMsg: 501 return p.handleWantedHashesMsg(ctx, msg) 502 503 case *ChunkDeliveryMsg: 504 return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, msg) 505 506 case *RetrieveRequestMsg: 507 return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg) 508 509 case *RequestSubscriptionMsg: 510 return p.handleRequestSubscription(ctx, msg) 511 512 case *QuitMsg: 513 return p.handleQuitMsg(msg) 514 515 default: 516 return fmt.Errorf("unknown message type: %T", msg) 517 } 518 } 519 520 type server struct { 521 Server 522 stream Stream 523 priority uint8 524 currentBatch []byte 525 } 526 527 // 528 type Server interface { 529 SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) 530 GetData(context.Context, []byte) ([]byte, error) 531 Close() 532 } 533 534 type client struct { 535 Client 536 stream Stream 537 priority uint8 538 sessionAt uint64 539 to uint64 540 next chan error 541 quit chan struct{} 542 543 intervalsKey string 544 intervalsStore state.Store 545 } 546 547 func peerStreamIntervalsKey(p *Peer, s Stream) string { 548 return p.ID().String() + s.String() 549 } 550 551 func (c client) AddInterval(start, end uint64) (err error) { 552 i := &intervals.Intervals{} 553 err = c.intervalsStore.Get(c.intervalsKey, i) 554 if err != nil { 555 return err 556 } 557 i.Add(start, end) 558 return c.intervalsStore.Put(c.intervalsKey, i) 559 } 560 561 func (c client) NextInterval() (start, end uint64, err error) { 562 i := &intervals.Intervals{} 563 err = c.intervalsStore.Get(c.intervalsKey, i) 564 if err != nil { 565 return 0, 0, err 566 } 567 start, end = i.Next() 568 return start, end, nil 569 } 570 571 // 572 type Client interface { 573 NeedData(context.Context, []byte) func() 574 BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) 575 Close() 576 } 577 578 func (c *client) nextBatch(from uint64) (nextFrom uint64, nextTo uint64) { 579 if c.to > 0 && from >= c.to { 580 return 0, 0 581 } 582 if c.stream.Live { 583 return from, 0 584 } else if from >= c.sessionAt { 585 if c.to > 0 { 586 return from, c.to 587 } 588 return from, math.MaxUint64 589 } 590 nextFrom, nextTo, err := c.NextInterval() 591 if err != nil { 592 log.Error("next intervals", "stream", c.stream) 593 return 594 } 595 if nextTo > c.to { 596 nextTo = c.to 597 } 598 if nextTo == 0 { 599 nextTo = c.sessionAt 600 } 601 return 602 } 603 604 func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error { 605 if tf := c.BatchDone(req.Stream, req.From, hashes, req.Root); tf != nil { 606 tp, err := tf() 607 if err != nil { 608 return err 609 } 610 if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil { 611 return err 612 } 613 if c.to > 0 && tp.Takeover.End >= c.to { 614 return p.streamer.Unsubscribe(p.Peer.ID(), req.Stream) 615 } 616 return nil 617 } 618 // 619 if err := c.AddInterval(req.From, req.To); err != nil { 620 return err 621 } 622 return nil 623 } 624 625 func (c *client) close() { 626 select { 627 case <-c.quit: 628 default: 629 close(c.quit) 630 } 631 c.Close() 632 } 633 634 // 635 // 636 type clientParams struct { 637 priority uint8 638 to uint64 639 // 640 clientCreatedC chan struct{} 641 } 642 643 func newClientParams(priority uint8, to uint64) *clientParams { 644 return &clientParams{ 645 priority: priority, 646 to: to, 647 clientCreatedC: make(chan struct{}), 648 } 649 } 650 651 func (c *clientParams) waitClient(ctx context.Context) error { 652 select { 653 case <-ctx.Done(): 654 return ctx.Err() 655 case <-c.clientCreatedC: 656 return nil 657 } 658 } 659 660 func (c *clientParams) clientCreated() { 661 close(c.clientCreatedC) 662 } 663 664 // 665 var Spec = &protocols.Spec{ 666 Name: "stream", 667 Version: 5, 668 MaxMsgSize: 10 * 1024 * 1024, 669 Messages: []interface{}{ 670 UnsubscribeMsg{}, 671 OfferedHashesMsg{}, 672 WantedHashesMsg{}, 673 TakeoverProofMsg{}, 674 SubscribeMsg{}, 675 RetrieveRequestMsg{}, 676 ChunkDeliveryMsg{}, 677 SubscribeErrorMsg{}, 678 RequestSubscriptionMsg{}, 679 QuitMsg{}, 680 }, 681 } 682 683 func (r *Registry) Protocols() []p2p.Protocol { 684 return []p2p.Protocol{ 685 { 686 Name: Spec.Name, 687 Version: Spec.Version, 688 Length: Spec.Length(), 689 Run: r.runProtocol, 690 // 691 // 692 }, 693 } 694 } 695 696 func (r *Registry) APIs() []rpc.API { 697 return []rpc.API{ 698 { 699 Namespace: "stream", 700 Version: "3.0", 701 Service: r.api, 702 Public: true, 703 }, 704 } 705 } 706 707 func (r *Registry) Start(server *p2p.Server) error { 708 log.Info("Streamer started") 709 return nil 710 } 711 712 func (r *Registry) Stop() error { 713 return nil 714 } 715 716 type Range struct { 717 From, To uint64 718 } 719 720 func NewRange(from, to uint64) *Range { 721 return &Range{ 722 From: from, 723 To: to, 724 } 725 } 726 727 func (r *Range) String() string { 728 return fmt.Sprintf("%v-%v", r.From, r.To) 729 } 730 731 func getHistoryPriority(priority uint8) uint8 { 732 if priority == 0 { 733 return 0 734 } 735 return priority - 1 736 } 737 738 func getHistoryStream(s Stream) Stream { 739 return NewStream(s.Name, s.Key, false) 740 } 741 742 type API struct { 743 streamer *Registry 744 } 745 746 func NewAPI(r *Registry) *API { 747 return &API{ 748 streamer: r, 749 } 750 } 751 752 func (api *API) SubscribeStream(peerId discover.NodeID, s Stream, history *Range, priority uint8) error { 753 return api.streamer.Subscribe(peerId, s, history, priority) 754 } 755 756 func (api *API) UnsubscribeStream(peerId discover.NodeID, s Stream) error { 757 return api.streamer.Unsubscribe(peerId, s) 758 } 759