github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/network/stream/peer.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package stream 18 19 import ( 20 "context" 21 "errors" 22 "fmt" 23 "sync" 24 "time" 25 26 "github.com/ethereum/go-ethereum/metrics" 27 "github.com/ethereum/go-ethereum/p2p/protocols" 28 "github.com/ethereum/go-ethereum/swarm/log" 29 pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue" 30 "github.com/ethereum/go-ethereum/swarm/network/stream/intervals" 31 "github.com/ethereum/go-ethereum/swarm/spancontext" 32 "github.com/ethereum/go-ethereum/swarm/state" 33 "github.com/ethereum/go-ethereum/swarm/storage" 34 "github.com/ethereum/go-ethereum/swarm/tracing" 35 opentracing "github.com/opentracing/opentracing-go" 36 ) 37 38 type notFoundError struct { 39 t string 40 s Stream 41 } 42 43 func newNotFoundError(t string, s Stream) *notFoundError { 44 return ¬FoundError{t: t, s: s} 45 } 46 47 func (e *notFoundError) Error() string { 48 return fmt.Sprintf("%s not found for stream %q", e.t, e.s) 49 } 50 51 // ErrMaxPeerServers will be returned if peer server limit is reached. 52 // It will be sent in the SubscribeErrorMsg. 53 var ErrMaxPeerServers = errors.New("max peer servers") 54 55 // Peer is the Peer extension for the streaming protocol 56 type Peer struct { 57 *protocols.Peer 58 streamer *Registry 59 pq *pq.PriorityQueue 60 serverMu sync.RWMutex 61 clientMu sync.RWMutex // protects both clients and clientParams 62 servers map[Stream]*server 63 clients map[Stream]*client 64 // clientParams map keeps required client arguments 65 // that are set on Registry.Subscribe and used 66 // on creating a new client in offered hashes handler. 67 clientParams map[Stream]*clientParams 68 quit chan struct{} 69 } 70 71 type WrappedPriorityMsg struct { 72 Context context.Context 73 Msg interface{} 74 } 75 76 // NewPeer is the constructor for Peer 77 func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { 78 p := &Peer{ 79 Peer: peer, 80 pq: pq.New(int(PriorityQueue), PriorityQueueCap), 81 streamer: streamer, 82 servers: make(map[Stream]*server), 83 clients: make(map[Stream]*client), 84 clientParams: make(map[Stream]*clientParams), 85 quit: make(chan struct{}), 86 } 87 ctx, cancel := context.WithCancel(context.Background()) 88 go p.pq.Run(ctx, func(i interface{}) { 89 wmsg := i.(WrappedPriorityMsg) 90 err := p.Send(wmsg.Context, wmsg.Msg) 91 if err != nil { 92 log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err) 93 p.Drop(err) 94 } 95 }) 96 97 // basic monitoring for pq contention 98 go func(pq *pq.PriorityQueue) { 99 ticker := time.NewTicker(5 * time.Second) 100 defer ticker.Stop() 101 for { 102 select { 103 case <-ticker.C: 104 var lenMaxi int 105 var capMaxi int 106 for k := range pq.Queues { 107 if lenMaxi < len(pq.Queues[k]) { 108 lenMaxi = len(pq.Queues[k]) 109 } 110 111 if capMaxi < cap(pq.Queues[k]) { 112 capMaxi = cap(pq.Queues[k]) 113 } 114 } 115 116 metrics.GetOrRegisterGauge(fmt.Sprintf("pq_len_%s", p.ID().TerminalString()), nil).Update(int64(lenMaxi)) 117 metrics.GetOrRegisterGauge(fmt.Sprintf("pq_cap_%s", p.ID().TerminalString()), nil).Update(int64(capMaxi)) 118 case <-p.quit: 119 return 120 } 121 } 122 }(p.pq) 123 124 go func() { 125 <-p.quit 126 127 cancel() 128 }() 129 return p 130 } 131 132 // Deliver sends a storeRequestMsg protocol message to the peer 133 // Depending on the `syncing` parameter we send different message types 134 func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8, syncing bool) error { 135 var msg interface{} 136 137 spanName := "send.chunk.delivery" 138 139 //we send different types of messages if delivery is for syncing or retrievals, 140 //even if handling and content of the message are the same, 141 //because swap accounting decides which messages need accounting based on the message type 142 if syncing { 143 msg = &ChunkDeliveryMsgSyncing{ 144 Addr: chunk.Address(), 145 SData: chunk.Data(), 146 } 147 spanName += ".syncing" 148 } else { 149 msg = &ChunkDeliveryMsgRetrieval{ 150 Addr: chunk.Address(), 151 SData: chunk.Data(), 152 } 153 spanName += ".retrieval" 154 } 155 156 ctx = context.WithValue(ctx, "stream_send_tag", nil) 157 return p.SendPriority(ctx, msg, priority) 158 } 159 160 // SendPriority sends message to the peer using the outgoing priority queue 161 func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8) error { 162 defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now()) 163 tracing.StartSaveSpan(ctx) 164 metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1) 165 wmsg := WrappedPriorityMsg{ 166 Context: ctx, 167 Msg: msg, 168 } 169 err := p.pq.Push(wmsg, int(priority)) 170 if err == pq.ErrContention { 171 log.Warn("dropping peer on priority queue contention", "peer", p.ID()) 172 p.Drop(err) 173 } 174 return err 175 } 176 177 // SendOfferedHashes sends OfferedHashesMsg protocol msg 178 func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error { 179 var sp opentracing.Span 180 ctx, sp := spancontext.StartSpan( 181 context.TODO(), 182 "send.offered.hashes", 183 ) 184 defer sp.Finish() 185 186 hashes, from, to, proof, err := s.setNextBatch(f, t) 187 if err != nil { 188 return err 189 } 190 // true only when quitting 191 if len(hashes) == 0 { 192 return nil 193 } 194 if proof == nil { 195 proof = &HandoverProof{ 196 Handover: &Handover{}, 197 } 198 } 199 s.currentBatch = hashes 200 msg := &OfferedHashesMsg{ 201 HandoverProof: proof, 202 Hashes: hashes, 203 From: from, 204 To: to, 205 Stream: s.stream, 206 } 207 log.Trace("Swarm syncer offer batch", "peer", p.ID(), "stream", s.stream, "len", len(hashes), "from", from, "to", to) 208 ctx = context.WithValue(ctx, "stream_send_tag", "send.offered.hashes") 209 return p.SendPriority(ctx, msg, s.priority) 210 } 211 212 func (p *Peer) getServer(s Stream) (*server, error) { 213 p.serverMu.RLock() 214 defer p.serverMu.RUnlock() 215 216 server := p.servers[s] 217 if server == nil { 218 return nil, newNotFoundError("server", s) 219 } 220 return server, nil 221 } 222 223 func (p *Peer) setServer(s Stream, o Server, priority uint8) (*server, error) { 224 p.serverMu.Lock() 225 defer p.serverMu.Unlock() 226 227 if p.servers[s] != nil { 228 return nil, fmt.Errorf("server %s already registered", s) 229 } 230 231 if p.streamer.maxPeerServers > 0 && len(p.servers) >= p.streamer.maxPeerServers { 232 return nil, ErrMaxPeerServers 233 } 234 235 sessionIndex, err := o.SessionIndex() 236 if err != nil { 237 return nil, err 238 } 239 os := &server{ 240 Server: o, 241 stream: s, 242 priority: priority, 243 sessionIndex: sessionIndex, 244 } 245 p.servers[s] = os 246 return os, nil 247 } 248 249 func (p *Peer) removeServer(s Stream) error { 250 p.serverMu.Lock() 251 defer p.serverMu.Unlock() 252 253 server, ok := p.servers[s] 254 if !ok { 255 return newNotFoundError("server", s) 256 } 257 server.Close() 258 delete(p.servers, s) 259 return nil 260 } 261 262 func (p *Peer) getClient(ctx context.Context, s Stream) (c *client, err error) { 263 var params *clientParams 264 func() { 265 p.clientMu.RLock() 266 defer p.clientMu.RUnlock() 267 268 c = p.clients[s] 269 if c != nil { 270 return 271 } 272 params = p.clientParams[s] 273 }() 274 if c != nil { 275 return c, nil 276 } 277 278 if params != nil { 279 //debug.PrintStack() 280 if err := params.waitClient(ctx); err != nil { 281 return nil, err 282 } 283 } 284 285 p.clientMu.RLock() 286 defer p.clientMu.RUnlock() 287 288 c = p.clients[s] 289 if c != nil { 290 return c, nil 291 } 292 return nil, newNotFoundError("client", s) 293 } 294 295 func (p *Peer) getOrSetClient(s Stream, from, to uint64) (c *client, created bool, err error) { 296 p.clientMu.Lock() 297 defer p.clientMu.Unlock() 298 299 c = p.clients[s] 300 if c != nil { 301 return c, false, nil 302 } 303 304 f, err := p.streamer.GetClientFunc(s.Name) 305 if err != nil { 306 return nil, false, err 307 } 308 309 is, err := f(p, s.Key, s.Live) 310 if err != nil { 311 return nil, false, err 312 } 313 314 cp, err := p.getClientParams(s) 315 if err != nil { 316 return nil, false, err 317 } 318 defer func() { 319 if err == nil { 320 if err := p.removeClientParams(s); err != nil { 321 log.Error("stream set client: remove client params", "stream", s, "peer", p, "err", err) 322 } 323 } 324 }() 325 326 intervalsKey := peerStreamIntervalsKey(p, s) 327 if s.Live { 328 // try to find previous history and live intervals and merge live into history 329 historyKey := peerStreamIntervalsKey(p, NewStream(s.Name, s.Key, false)) 330 historyIntervals := &intervals.Intervals{} 331 err := p.streamer.intervalsStore.Get(historyKey, historyIntervals) 332 switch err { 333 case nil: 334 liveIntervals := &intervals.Intervals{} 335 err := p.streamer.intervalsStore.Get(intervalsKey, liveIntervals) 336 switch err { 337 case nil: 338 historyIntervals.Merge(liveIntervals) 339 if err := p.streamer.intervalsStore.Put(historyKey, historyIntervals); err != nil { 340 log.Error("stream set client: put history intervals", "stream", s, "peer", p, "err", err) 341 } 342 case state.ErrNotFound: 343 default: 344 log.Error("stream set client: get live intervals", "stream", s, "peer", p, "err", err) 345 } 346 case state.ErrNotFound: 347 default: 348 log.Error("stream set client: get history intervals", "stream", s, "peer", p, "err", err) 349 } 350 } 351 352 if err := p.streamer.intervalsStore.Put(intervalsKey, intervals.NewIntervals(from)); err != nil { 353 return nil, false, err 354 } 355 356 next := make(chan error, 1) 357 c = &client{ 358 Client: is, 359 stream: s, 360 priority: cp.priority, 361 to: cp.to, 362 next: next, 363 quit: make(chan struct{}), 364 intervalsStore: p.streamer.intervalsStore, 365 intervalsKey: intervalsKey, 366 } 367 p.clients[s] = c 368 cp.clientCreated() // unblock all possible getClient calls that are waiting 369 next <- nil // this is to allow wantedKeysMsg before first batch arrives 370 return c, true, nil 371 } 372 373 func (p *Peer) removeClient(s Stream) error { 374 p.clientMu.Lock() 375 defer p.clientMu.Unlock() 376 377 client, ok := p.clients[s] 378 if !ok { 379 return newNotFoundError("client", s) 380 } 381 client.close() 382 delete(p.clients, s) 383 return nil 384 } 385 386 func (p *Peer) setClientParams(s Stream, params *clientParams) error { 387 p.clientMu.Lock() 388 defer p.clientMu.Unlock() 389 390 if p.clients[s] != nil { 391 return fmt.Errorf("client %s already exists", s) 392 } 393 if p.clientParams[s] != nil { 394 return fmt.Errorf("client params %s already set", s) 395 } 396 p.clientParams[s] = params 397 return nil 398 } 399 400 func (p *Peer) getClientParams(s Stream) (*clientParams, error) { 401 params := p.clientParams[s] 402 if params == nil { 403 return nil, fmt.Errorf("client params '%v' not provided to peer %v", s, p.ID()) 404 } 405 return params, nil 406 } 407 408 func (p *Peer) removeClientParams(s Stream) error { 409 _, ok := p.clientParams[s] 410 if !ok { 411 return newNotFoundError("client params", s) 412 } 413 delete(p.clientParams, s) 414 return nil 415 } 416 417 func (p *Peer) close() { 418 for _, s := range p.servers { 419 s.Close() 420 } 421 }