github.com/yinchengtsinghua/golang-Eos-dpos-Ethereum@v0.0.0-20190121132951-92cc4225ed8e/swarm/network/stream/messages.go (about) 1 2 //此源码被清华学神尹成大魔王专业翻译分析并修改 3 //尹成QQ77025077 4 //尹成微信18510341407 5 //尹成所在QQ群721929980 6 //尹成邮箱 yinc13@mails.tsinghua.edu.cn 7 //尹成毕业于清华大学,微软区块链领域全球最有价值专家 8 //https://mvp.microsoft.com/zh-cn/PublicProfile/4033620 9 // 10 // 11 // 12 // 13 // 14 // 15 // 16 // 17 // 18 // 19 // 20 // 21 // 22 // 23 // 24 25 package stream 26 27 import ( 28 "context" 29 "errors" 30 "fmt" 31 "sync" 32 "time" 33 34 "github.com/ethereum/go-ethereum/metrics" 35 "github.com/ethereum/go-ethereum/swarm/log" 36 bv "github.com/ethereum/go-ethereum/swarm/network/bitvector" 37 "github.com/ethereum/go-ethereum/swarm/spancontext" 38 "github.com/ethereum/go-ethereum/swarm/storage" 39 opentracing "github.com/opentracing/opentracing-go" 40 ) 41 42 // 43 type Stream struct { 44 // 45 Name string 46 // 47 Key string 48 // 49 // 50 Live bool 51 } 52 53 func NewStream(name string, key string, live bool) Stream { 54 return Stream{ 55 Name: name, 56 Key: key, 57 Live: live, 58 } 59 } 60 61 // 62 func (s Stream) String() string { 63 t := "h" 64 if s.Live { 65 t = "l" 66 } 67 return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t) 68 } 69 70 // 71 type SubscribeMsg struct { 72 Stream Stream 73 History *Range `rlp:"nil"` 74 Priority uint8 // 75 } 76 77 // 78 // 79 type RequestSubscriptionMsg struct { 80 Stream Stream 81 History *Range `rlp:"nil"` 82 Priority uint8 // 83 } 84 85 func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) { 86 log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream)) 87 return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority) 88 } 89 90 func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) { 91 metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1) 92 93 defer func() { 94 if err != nil { 95 if e := p.Send(context.TODO(), SubscribeErrorMsg{ 96 Error: err.Error(), 97 }); e != nil { 98 log.Error("send stream subscribe error message", "err", err) 99 } 100 } 101 }() 102 103 log.Debug("received subscription", "from", p.streamer.addr.ID(), "peer", p.ID(), "stream", req.Stream, "history", req.History) 104 105 f, err := p.streamer.GetServerFunc(req.Stream.Name) 106 if err != nil { 107 return err 108 } 109 110 s, err := f(p, req.Stream.Key, req.Stream.Live) 111 if err != nil { 112 return err 113 } 114 os, err := p.setServer(req.Stream, s, req.Priority) 115 if err != nil { 116 return err 117 } 118 119 var from uint64 120 var to uint64 121 if !req.Stream.Live && req.History != nil { 122 from = req.History.From 123 to = req.History.To 124 } 125 126 go func() { 127 if err := p.SendOfferedHashes(os, from, to); err != nil { 128 log.Warn("SendOfferedHashes dropping peer", "err", err) 129 p.Drop(err) 130 } 131 }() 132 133 if req.Stream.Live && req.History != nil { 134 // 135 s, err := f(p, req.Stream.Key, false) 136 if err != nil { 137 return err 138 } 139 140 os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority)) 141 if err != nil { 142 return err 143 } 144 go func() { 145 if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil { 146 log.Warn("SendOfferedHashes dropping peer", "err", err) 147 p.Drop(err) 148 } 149 }() 150 } 151 152 return nil 153 } 154 155 type SubscribeErrorMsg struct { 156 Error string 157 } 158 159 func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) { 160 return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error) 161 } 162 163 type UnsubscribeMsg struct { 164 Stream Stream 165 } 166 167 func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error { 168 return p.removeServer(req.Stream) 169 } 170 171 type QuitMsg struct { 172 Stream Stream 173 } 174 175 func (p *Peer) handleQuitMsg(req *QuitMsg) error { 176 return p.removeClient(req.Stream) 177 } 178 179 // 180 // 181 type OfferedHashesMsg struct { 182 Stream Stream // 183 From, To uint64 // 184 Hashes []byte // 185 *HandoverProof // 186 } 187 188 // 189 func (m OfferedHashesMsg) String() string { 190 return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize) 191 } 192 193 // 194 // 195 func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error { 196 metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1) 197 198 var sp opentracing.Span 199 ctx, sp = spancontext.StartSpan( 200 ctx, 201 "handle.offered.hashes") 202 defer sp.Finish() 203 204 c, _, err := p.getOrSetClient(req.Stream, req.From, req.To) 205 if err != nil { 206 return err 207 } 208 hashes := req.Hashes 209 want, err := bv.New(len(hashes) / HashSize) 210 if err != nil { 211 return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err) 212 } 213 wg := sync.WaitGroup{} 214 for i := 0; i < len(hashes); i += HashSize { 215 hash := hashes[i : i+HashSize] 216 217 if wait := c.NeedData(ctx, hash); wait != nil { 218 want.Set(i/HashSize, true) 219 wg.Add(1) 220 // 221 go func(w func()) { 222 w() 223 wg.Done() 224 }(wait) 225 } 226 } 227 // 228 // 229 // 230 // 231 // 232 // 233 // 234 // 235 // 236 // 237 // 238 // 239 // 240 go func() { 241 wg.Wait() 242 select { 243 case c.next <- c.batchDone(p, req, hashes): 244 case <-c.quit: 245 } 246 }() 247 // 248 // 249 if c.stream.Live { 250 c.sessionAt = req.From 251 } 252 from, to := c.nextBatch(req.To + 1) 253 log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To) 254 if from == to { 255 return nil 256 } 257 258 msg := &WantedHashesMsg{ 259 Stream: req.Stream, 260 Want: want.Bytes(), 261 From: from, 262 To: to, 263 } 264 go func() { 265 select { 266 case <-time.After(120 * time.Second): 267 log.Warn("handleOfferedHashesMsg timeout, so dropping peer") 268 p.Drop(errors.New("handle offered hashes timeout")) 269 return 270 case err := <-c.next: 271 if err != nil { 272 log.Warn("c.next dropping peer", "err", err) 273 p.Drop(err) 274 return 275 } 276 case <-c.quit: 277 return 278 } 279 log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To) 280 err := p.SendPriority(ctx, msg, c.priority) 281 if err != nil { 282 log.Warn("SendPriority err, so dropping peer", "err", err) 283 p.Drop(err) 284 } 285 }() 286 return nil 287 } 288 289 // 290 // 291 type WantedHashesMsg struct { 292 Stream Stream 293 Want []byte // 294 From, To uint64 // 295 } 296 297 // 298 func (m WantedHashesMsg) String() string { 299 return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To) 300 } 301 302 // 303 // 304 // 305 func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) error { 306 metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1) 307 308 log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To) 309 s, err := p.getServer(req.Stream) 310 if err != nil { 311 return err 312 } 313 hashes := s.currentBatch 314 // 315 go func() { 316 if err := p.SendOfferedHashes(s, req.From, req.To); err != nil { 317 log.Warn("SendOfferedHashes dropping peer", "err", err) 318 p.Drop(err) 319 } 320 }() 321 // 322 l := len(hashes) / HashSize 323 324 log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l) 325 want, err := bv.NewFromBytes(req.Want, l) 326 if err != nil { 327 return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err) 328 } 329 for i := 0; i < l; i++ { 330 if want.Get(i) { 331 metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1) 332 333 hash := hashes[i*HashSize : (i+1)*HashSize] 334 data, err := s.GetData(ctx, hash) 335 if err != nil { 336 return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err) 337 } 338 chunk := storage.NewChunk(hash, nil) 339 chunk.SData = data 340 if length := len(chunk.SData); length < 9 { 341 log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr) 342 } 343 if err := p.Deliver(ctx, chunk, s.priority); err != nil { 344 return err 345 } 346 } 347 } 348 return nil 349 } 350 351 // 352 type Handover struct { 353 Stream Stream // 354 Start, End uint64 // 355 Root []byte // 356 } 357 358 // 359 type HandoverProof struct { 360 Sig []byte // 361 *Handover 362 } 363 364 // 365 // 366 type Takeover Handover 367 368 // 369 // 370 type TakeoverProof struct { 371 Sig []byte // 372 *Takeover 373 } 374 375 // 376 type TakeoverProofMsg TakeoverProof 377 378 // 379 func (m TakeoverProofMsg) String() string { 380 return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig) 381 } 382 383 func (p *Peer) handleTakeoverProofMsg(ctx context.Context, req *TakeoverProofMsg) error { 384 _, err := p.getServer(req.Stream) 385 // 386 return err 387 }