github.com/susy-go/susy-graviton@v0.0.0-20190614130430-36cddae42305/swarm/network/stream/peer.go (about) 1 // Copyleft 2018 The susy-graviton Authors 2 // This file is part of the susy-graviton library. 3 // 4 // The susy-graviton library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The susy-graviton library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MSRCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the susy-graviton library. If not, see <http://www.gnu.org/licenses/>. 16 17 package stream 18 19 import ( 20 "context" 21 "errors" 22 "fmt" 23 "sync" 24 "time" 25 26 "github.com/susy-go/susy-graviton/metrics" 27 "github.com/susy-go/susy-graviton/p2p/protocols" 28 "github.com/susy-go/susy-graviton/swarm/log" 29 pq "github.com/susy-go/susy-graviton/swarm/network/priorityqueue" 30 "github.com/susy-go/susy-graviton/swarm/network/stream/intervals" 31 "github.com/susy-go/susy-graviton/swarm/spancontext" 32 "github.com/susy-go/susy-graviton/swarm/state" 33 "github.com/susy-go/susy-graviton/swarm/storage" 34 opentracing "github.com/opentracing/opentracing-go" 35 ) 36 37 type notFoundError struct { 38 t string 39 s Stream 40 } 41 42 func newNotFoundError(t string, s Stream) *notFoundError { 43 return ¬FoundError{t: t, s: s} 44 } 45 46 func (e *notFoundError) Error() string { 47 return fmt.Sprintf("%s not found for stream %q", e.t, e.s) 48 } 49 50 // ErrMaxPeerServers will be returned if peer server limit is reached. 51 // It will be sent in the SubscribeErrorMsg. 52 var ErrMaxPeerServers = errors.New("max peer servers") 53 54 // Peer is the Peer extension for the streaming protocol 55 type Peer struct { 56 *protocols.Peer 57 streamer *Registry 58 pq *pq.PriorityQueue 59 serverMu sync.RWMutex 60 clientMu sync.RWMutex // protects both clients and clientParams 61 servers map[Stream]*server 62 clients map[Stream]*client 63 // clientParams map keeps required client arguments 64 // that are set on Registry.Subscribe and used 65 // on creating a new client in offered hashes handler. 66 clientParams map[Stream]*clientParams 67 quit chan struct{} 68 spans sync.Map 69 } 70 71 type WrappedPriorityMsg struct { 72 Context context.Context 73 Msg interface{} 74 } 75 76 // NewPeer is the constructor for Peer 77 func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { 78 p := &Peer{ 79 Peer: peer, 80 pq: pq.New(int(PriorityQueue), PriorityQueueCap), 81 streamer: streamer, 82 servers: make(map[Stream]*server), 83 clients: make(map[Stream]*client), 84 clientParams: make(map[Stream]*clientParams), 85 quit: make(chan struct{}), 86 spans: sync.Map{}, 87 } 88 ctx, cancel := context.WithCancel(context.Background()) 89 go p.pq.Run(ctx, func(i interface{}) { 90 wmsg := i.(WrappedPriorityMsg) 91 defer p.spans.Delete(wmsg.Context) 92 sp, ok := p.spans.Load(wmsg.Context) 93 if ok { 94 defer sp.(opentracing.Span).Finish() 95 } 96 err := p.Send(wmsg.Context, wmsg.Msg) 97 if err != nil { 98 log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err) 99 p.Drop(err) 100 } 101 }) 102 103 // basic monitoring for pq contention 104 go func(pq *pq.PriorityQueue) { 105 ticker := time.NewTicker(5 * time.Second) 106 defer ticker.Stop() 107 for { 108 select { 109 case <-ticker.C: 110 var len_maxi int 111 var cap_maxi int 112 for k := range pq.Queues { 113 if len_maxi < len(pq.Queues[k]) { 114 len_maxi = len(pq.Queues[k]) 115 } 116 117 if cap_maxi < cap(pq.Queues[k]) { 118 cap_maxi = cap(pq.Queues[k]) 119 } 120 } 121 122 metrics.GetOrRegisterGauge(fmt.Sprintf("pq_len_%s", p.ID().TerminalString()), nil).Update(int64(len_maxi)) 123 metrics.GetOrRegisterGauge(fmt.Sprintf("pq_cap_%s", p.ID().TerminalString()), nil).Update(int64(cap_maxi)) 124 case <-p.quit: 125 return 126 } 127 } 128 }(p.pq) 129 130 go func() { 131 <-p.quit 132 cancel() 133 }() 134 return p 135 } 136 137 // Deliver sends a storeRequestMsg protocol message to the peer 138 // Depending on the `syncing` parameter we send different message types 139 func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8, syncing bool) error { 140 var msg interface{} 141 142 spanName := "send.chunk.delivery" 143 144 //we send different types of messages if delivery is for syncing or retrievals, 145 //even if handling and content of the message are the same, 146 //because swap accounting decides which messages need accounting based on the message type 147 if syncing { 148 msg = &ChunkDeliveryMsgSyncing{ 149 Addr: chunk.Address(), 150 SData: chunk.Data(), 151 } 152 spanName += ".syncing" 153 } else { 154 msg = &ChunkDeliveryMsgRetrieval{ 155 Addr: chunk.Address(), 156 SData: chunk.Data(), 157 } 158 spanName += ".retrieval" 159 } 160 161 return p.SendPriority(ctx, msg, priority, spanName) 162 } 163 164 // SendPriority sends message to the peer using the outgoing priority queue 165 func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8, traceId string) error { 166 defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now()) 167 metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1) 168 if traceId != "" { 169 var sp opentracing.Span 170 ctx, sp = spancontext.StartSpan( 171 ctx, 172 traceId, 173 ) 174 p.spans.Store(ctx, sp) 175 } 176 wmsg := WrappedPriorityMsg{ 177 Context: ctx, 178 Msg: msg, 179 } 180 err := p.pq.Push(wmsg, int(priority)) 181 if err == pq.ErrContention { 182 log.Warn("dropping peer on priority queue contention", "peer", p.ID()) 183 p.Drop(err) 184 } 185 return err 186 } 187 188 // SendOfferedHashes sends OfferedHashesMsg protocol msg 189 func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error { 190 var sp opentracing.Span 191 ctx, sp := spancontext.StartSpan( 192 context.TODO(), 193 "send.offered.hashes") 194 defer sp.Finish() 195 196 hashes, from, to, proof, err := s.setNextBatch(f, t) 197 if err != nil { 198 return err 199 } 200 // true only when quitting 201 if len(hashes) == 0 { 202 return nil 203 } 204 if proof == nil { 205 proof = &HandoverProof{ 206 Handover: &Handover{}, 207 } 208 } 209 s.currentBatch = hashes 210 msg := &OfferedHashesMsg{ 211 HandoverProof: proof, 212 Hashes: hashes, 213 From: from, 214 To: to, 215 Stream: s.stream, 216 } 217 log.Trace("Swarm syncer offer batch", "peer", p.ID(), "stream", s.stream, "len", len(hashes), "from", from, "to", to) 218 return p.SendPriority(ctx, msg, s.priority, "send.offered.hashes") 219 } 220 221 func (p *Peer) getServer(s Stream) (*server, error) { 222 p.serverMu.RLock() 223 defer p.serverMu.RUnlock() 224 225 server := p.servers[s] 226 if server == nil { 227 return nil, newNotFoundError("server", s) 228 } 229 return server, nil 230 } 231 232 func (p *Peer) setServer(s Stream, o Server, priority uint8) (*server, error) { 233 p.serverMu.Lock() 234 defer p.serverMu.Unlock() 235 236 if p.servers[s] != nil { 237 return nil, fmt.Errorf("server %s already registered", s) 238 } 239 240 if p.streamer.maxPeerServers > 0 && len(p.servers) >= p.streamer.maxPeerServers { 241 return nil, ErrMaxPeerServers 242 } 243 244 sessionIndex, err := o.SessionIndex() 245 if err != nil { 246 return nil, err 247 } 248 os := &server{ 249 Server: o, 250 stream: s, 251 priority: priority, 252 sessionIndex: sessionIndex, 253 } 254 p.servers[s] = os 255 return os, nil 256 } 257 258 func (p *Peer) removeServer(s Stream) error { 259 p.serverMu.Lock() 260 defer p.serverMu.Unlock() 261 262 server, ok := p.servers[s] 263 if !ok { 264 return newNotFoundError("server", s) 265 } 266 server.Close() 267 delete(p.servers, s) 268 return nil 269 } 270 271 func (p *Peer) getClient(ctx context.Context, s Stream) (c *client, err error) { 272 var params *clientParams 273 func() { 274 p.clientMu.RLock() 275 defer p.clientMu.RUnlock() 276 277 c = p.clients[s] 278 if c != nil { 279 return 280 } 281 params = p.clientParams[s] 282 }() 283 if c != nil { 284 return c, nil 285 } 286 287 if params != nil { 288 //debug.PrintStack() 289 if err := params.waitClient(ctx); err != nil { 290 return nil, err 291 } 292 } 293 294 p.clientMu.RLock() 295 defer p.clientMu.RUnlock() 296 297 c = p.clients[s] 298 if c != nil { 299 return c, nil 300 } 301 return nil, newNotFoundError("client", s) 302 } 303 304 func (p *Peer) getOrSetClient(s Stream, from, to uint64) (c *client, created bool, err error) { 305 p.clientMu.Lock() 306 defer p.clientMu.Unlock() 307 308 c = p.clients[s] 309 if c != nil { 310 return c, false, nil 311 } 312 313 f, err := p.streamer.GetClientFunc(s.Name) 314 if err != nil { 315 return nil, false, err 316 } 317 318 is, err := f(p, s.Key, s.Live) 319 if err != nil { 320 return nil, false, err 321 } 322 323 cp, err := p.getClientParams(s) 324 if err != nil { 325 return nil, false, err 326 } 327 defer func() { 328 if err == nil { 329 if err := p.removeClientParams(s); err != nil { 330 log.Error("stream set client: remove client params", "stream", s, "peer", p, "err", err) 331 } 332 } 333 }() 334 335 intervalsKey := peerStreamIntervalsKey(p, s) 336 if s.Live { 337 // try to find previous history and live intervals and merge live into history 338 historyKey := peerStreamIntervalsKey(p, NewStream(s.Name, s.Key, false)) 339 historyIntervals := &intervals.Intervals{} 340 err := p.streamer.intervalsStore.Get(historyKey, historyIntervals) 341 switch err { 342 case nil: 343 liveIntervals := &intervals.Intervals{} 344 err := p.streamer.intervalsStore.Get(intervalsKey, liveIntervals) 345 switch err { 346 case nil: 347 historyIntervals.Merge(liveIntervals) 348 if err := p.streamer.intervalsStore.Put(historyKey, historyIntervals); err != nil { 349 log.Error("stream set client: put history intervals", "stream", s, "peer", p, "err", err) 350 } 351 case state.ErrNotFound: 352 default: 353 log.Error("stream set client: get live intervals", "stream", s, "peer", p, "err", err) 354 } 355 case state.ErrNotFound: 356 default: 357 log.Error("stream set client: get history intervals", "stream", s, "peer", p, "err", err) 358 } 359 } 360 361 if err := p.streamer.intervalsStore.Put(intervalsKey, intervals.NewIntervals(from)); err != nil { 362 return nil, false, err 363 } 364 365 next := make(chan error, 1) 366 c = &client{ 367 Client: is, 368 stream: s, 369 priority: cp.priority, 370 to: cp.to, 371 next: next, 372 quit: make(chan struct{}), 373 intervalsStore: p.streamer.intervalsStore, 374 intervalsKey: intervalsKey, 375 } 376 p.clients[s] = c 377 cp.clientCreated() // unblock all possible getClient calls that are waiting 378 next <- nil // this is to allow wantedKeysMsg before first batch arrives 379 return c, true, nil 380 } 381 382 func (p *Peer) removeClient(s Stream) error { 383 p.clientMu.Lock() 384 defer p.clientMu.Unlock() 385 386 client, ok := p.clients[s] 387 if !ok { 388 return newNotFoundError("client", s) 389 } 390 client.close() 391 delete(p.clients, s) 392 return nil 393 } 394 395 func (p *Peer) setClientParams(s Stream, params *clientParams) error { 396 p.clientMu.Lock() 397 defer p.clientMu.Unlock() 398 399 if p.clients[s] != nil { 400 return fmt.Errorf("client %s already exists", s) 401 } 402 if p.clientParams[s] != nil { 403 return fmt.Errorf("client params %s already set", s) 404 } 405 p.clientParams[s] = params 406 return nil 407 } 408 409 func (p *Peer) getClientParams(s Stream) (*clientParams, error) { 410 params := p.clientParams[s] 411 if params == nil { 412 return nil, fmt.Errorf("client params '%v' not provided to peer %v", s, p.ID()) 413 } 414 return params, nil 415 } 416 417 func (p *Peer) removeClientParams(s Stream) error { 418 _, ok := p.clientParams[s] 419 if !ok { 420 return newNotFoundError("client params", s) 421 } 422 delete(p.clientParams, s) 423 return nil 424 } 425 426 func (p *Peer) close() { 427 for _, s := range p.servers { 428 s.Close() 429 } 430 }