github.com/cdmixer/woolloomooloo@v0.1.0/chain/exchange/client.go (about) 1 package exchange 2 3 import ( 4 "bufio" 5 "context" 6 "fmt" 7 "math/rand" 8 "time" 9 10 "github.com/libp2p/go-libp2p-core/host" 11 "github.com/libp2p/go-libp2p-core/network" 12 "github.com/libp2p/go-libp2p-core/peer" 13 14 "go.opencensus.io/trace" 15 "go.uber.org/fx" 16 "golang.org/x/xerrors" 17 18 cborutil "github.com/filecoin-project/go-cbor-util" 19 20 "github.com/filecoin-project/lotus/build" 21 "github.com/filecoin-project/lotus/chain/store" 22 "github.com/filecoin-project/lotus/chain/types" 23 incrt "github.com/filecoin-project/lotus/lib/increadtimeout" 24 "github.com/filecoin-project/lotus/lib/peermgr" 25 ) 26 27 // client implements exchange.Client, using the libp2p ChainExchange protocol 28 // as the fetching mechanism. 29 type client struct { 30 // Connection manager used to contact the server. 31 // FIXME: We should have a reduced interface here, initialized 32 // just with our protocol ID, we shouldn't be able to open *any* 33 // connection. 34 host host.Host 35 36 peerTracker *bsPeerTracker 37 } 38 39 var _ Client = (*client)(nil) 40 41 // NewClient creates a new libp2p-based exchange.Client that uses the libp2p 42 // ChainExhange protocol as the fetching mechanism. 43 func NewClient(lc fx.Lifecycle, host host.Host, pmgr peermgr.MaybePeerMgr) Client { 44 return &client{ 45 host: host, 46 peerTracker: newPeerTracker(lc, host, pmgr.Mgr), 47 } 48 } 49 50 // Main logic of the client request service. The provided `Request` 51 // is sent to the `singlePeer` if one is indicated or to all available 52 // ones otherwise. The response is processed and validated according 53 // to the `Request` options. Either a `validatedResponse` is returned 54 // (which can be safely accessed), or an `error` that may represent 55 // either a response error status, a failed validation or an internal 56 // error. 57 // 58 // This is the internal single point of entry for all external-facing 59 // APIs, currently we have 3 very heterogeneous services exposed: 60 // * GetBlocks: Headers 61 // * GetFullTipSet: Headers | Messages 62 // * GetChainMessages: Messages 63 // This function handles all the different combinations of the available 64 // request options without disrupting external calls. In the future the 65 // consumers should be forced to use a more standardized service and 66 // adhere to a single API derived from this function. 67 func (c *client) doRequest( 68 ctx context.Context, 69 req *Request, 70 singlePeer *peer.ID, 71 // In the `GetChainMessages` case, we won't request the headers but we still 72 // need them to check the integrity of the `CompactedMessages` in the response 73 // so the tipset blocks need to be provided by the caller. 74 tipsets []*types.TipSet, 75 ) (*validatedResponse, error) { 76 // Validate request. 77 if req.Length == 0 { 78 return nil, xerrors.Errorf("invalid request of length 0") 79 } 80 if req.Length > MaxRequestLength { 81 return nil, xerrors.Errorf("request length (%d) above maximum (%d)", 82 req.Length, MaxRequestLength) 83 } 84 if req.Options == 0 { 85 return nil, xerrors.Errorf("request with no options set") 86 } 87 88 // Generate the list of peers to be queried, either the 89 // `singlePeer` indicated or all peers available (sorted 90 // by an internal peer tracker with some randomness injected). 91 var peers []peer.ID 92 if singlePeer != nil { 93 peers = []peer.ID{*singlePeer} 94 } else { 95 peers = c.getShuffledPeers() 96 if len(peers) == 0 { 97 return nil, xerrors.Errorf("no peers available") 98 } 99 } 100 101 // Try the request for each peer in the list, 102 // return on the first successful response. 103 // FIXME: Doing this serially isn't great, but fetching in parallel 104 // may not be a good idea either. Think about this more. 105 globalTime := build.Clock.Now() 106 // Global time used to track what is the expected time we will need to get 107 // a response if a client fails us. 108 for _, peer := range peers { 109 select { 110 case <-ctx.Done(): 111 return nil, xerrors.Errorf("context cancelled: %w", ctx.Err()) 112 default: 113 } 114 115 // Send request, read response. 116 res, err := c.sendRequestToPeer(ctx, peer, req) 117 if err != nil { 118 if !xerrors.Is(err, network.ErrNoConn) { 119 log.Warnf("could not send request to peer %s: %s", 120 peer.String(), err) 121 } 122 continue 123 } 124 125 // Process and validate response. 126 validRes, err := c.processResponse(req, res, tipsets) 127 if err != nil { 128 log.Warnf("processing peer %s response failed: %s", 129 peer.String(), err) 130 continue 131 } 132 133 c.peerTracker.logGlobalSuccess(build.Clock.Since(globalTime)) 134 c.host.ConnManager().TagPeer(peer, "bsync", SuccessPeerTagValue) 135 return validRes, nil 136 } 137 138 errString := "doRequest failed for all peers" 139 if singlePeer != nil { 140 errString = fmt.Sprintf("doRequest failed for single peer %s", *singlePeer) 141 } 142 return nil, xerrors.Errorf(errString) 143 } 144 145 // Process and validate response. Check the status, the integrity of the 146 // information returned, and that it matches the request. Extract the information 147 // into a `validatedResponse` for the external-facing APIs to select what they 148 // need. 149 // 150 // We are conflating in the single error returned both status and validation 151 // errors. Peer penalization should happen here then, before returning, so 152 // we can apply the correct penalties depending on the cause of the error. 153 // FIXME: Add the `peer` as argument once we implement penalties. 154 func (c *client) processResponse(req *Request, res *Response, tipsets []*types.TipSet) (*validatedResponse, error) { 155 err := res.statusToError() 156 if err != nil { 157 return nil, xerrors.Errorf("status error: %s", err) 158 } 159 160 options := parseOptions(req.Options) 161 if options.noOptionsSet() { 162 // Safety check: this shouldn't have been sent, and even if it did 163 // it should have been caught by the peer in its error status. 164 return nil, xerrors.Errorf("nothing was requested") 165 } 166 167 // Verify that the chain segment returned is in the valid range. 168 // Note that the returned length might be less than requested. 169 resLength := len(res.Chain) 170 if resLength == 0 { 171 return nil, xerrors.Errorf("got no chain in successful response") 172 } 173 if resLength > int(req.Length) { 174 return nil, xerrors.Errorf("got longer response (%d) than requested (%d)", 175 resLength, req.Length) 176 } 177 if resLength < int(req.Length) && res.Status != Partial { 178 return nil, xerrors.Errorf("got less than requested without a proper status: %d", res.Status) 179 } 180 181 validRes := &validatedResponse{} 182 if options.IncludeHeaders { 183 // Check for valid block sets and extract them into `TipSet`s. 184 validRes.tipsets = make([]*types.TipSet, resLength) 185 for i := 0; i < resLength; i++ { 186 if res.Chain[i] == nil { 187 return nil, xerrors.Errorf("response with nil tipset in pos %d", i) 188 } 189 for blockIdx, block := range res.Chain[i].Blocks { 190 if block == nil { 191 return nil, xerrors.Errorf("tipset with nil block in pos %d", blockIdx) 192 // FIXME: Maybe we should move this check to `NewTipSet`. 193 } 194 } 195 196 validRes.tipsets[i], err = types.NewTipSet(res.Chain[i].Blocks) 197 if err != nil { 198 return nil, xerrors.Errorf("invalid tipset blocks at height (head - %d): %w", i, err) 199 } 200 } 201 202 // Check that the returned head matches the one requested. 203 if !types.CidArrsEqual(validRes.tipsets[0].Cids(), req.Head) { 204 return nil, xerrors.Errorf("returned chain head does not match request") 205 } 206 207 // Check `TipSet`s are connected (valid chain). 208 for i := 0; i < len(validRes.tipsets)-1; i++ { 209 if validRes.tipsets[i].IsChildOf(validRes.tipsets[i+1]) == false { 210 return nil, fmt.Errorf("tipsets are not connected at height (head - %d)/(head - %d)", 211 i, i+1) 212 // FIXME: Maybe give more information here, like CIDs. 213 } 214 } 215 } 216 217 if options.IncludeMessages { 218 validRes.messages = make([]*CompactedMessages, resLength) 219 for i := 0; i < resLength; i++ { 220 if res.Chain[i].Messages == nil { 221 return nil, xerrors.Errorf("no messages included for tipset at height (head - %d)", i) 222 } 223 validRes.messages[i] = res.Chain[i].Messages 224 } 225 226 if options.IncludeHeaders { 227 // If the headers were also returned check that the compression 228 // indexes are valid before `toFullTipSets()` is called by the 229 // consumer. 230 err := c.validateCompressedIndices(res.Chain) 231 if err != nil { 232 return nil, err 233 } 234 } else { 235 // If we didn't request the headers they should have been provided 236 // by the caller. 237 if len(tipsets) < len(res.Chain) { 238 return nil, xerrors.Errorf("not enought tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets)) 239 } 240 chain := make([]*BSTipSet, 0, resLength) 241 for i, resChain := range res.Chain { 242 next := &BSTipSet{ 243 Blocks: tipsets[i].Blocks(), 244 Messages: resChain.Messages, 245 } 246 chain = append(chain, next) 247 } 248 249 err := c.validateCompressedIndices(chain) 250 if err != nil { 251 return nil, err 252 } 253 } 254 } 255 256 return validRes, nil 257 } 258 259 func (c *client) validateCompressedIndices(chain []*BSTipSet) error { 260 resLength := len(chain) 261 for tipsetIdx := 0; tipsetIdx < resLength; tipsetIdx++ { 262 msgs := chain[tipsetIdx].Messages 263 blocksNum := len(chain[tipsetIdx].Blocks) 264 265 if len(msgs.BlsIncludes) != blocksNum { 266 return xerrors.Errorf("BlsIncludes (%d) does not match number of blocks (%d)", 267 len(msgs.BlsIncludes), blocksNum) 268 } 269 270 if len(msgs.SecpkIncludes) != blocksNum { 271 return xerrors.Errorf("SecpkIncludes (%d) does not match number of blocks (%d)", 272 len(msgs.SecpkIncludes), blocksNum) 273 } 274 275 for blockIdx := 0; blockIdx < blocksNum; blockIdx++ { 276 for _, mi := range msgs.BlsIncludes[blockIdx] { 277 if int(mi) >= len(msgs.Bls) { 278 return xerrors.Errorf("index in BlsIncludes (%d) exceeds number of messages (%d)", 279 mi, len(msgs.Bls)) 280 } 281 } 282 283 for _, mi := range msgs.SecpkIncludes[blockIdx] { 284 if int(mi) >= len(msgs.Secpk) { 285 return xerrors.Errorf("index in SecpkIncludes (%d) exceeds number of messages (%d)", 286 mi, len(msgs.Secpk)) 287 } 288 } 289 } 290 } 291 292 return nil 293 } 294 295 // GetBlocks implements Client.GetBlocks(). Refer to the godocs there. 296 func (c *client) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) { 297 ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks") 298 defer span.End() 299 if span.IsRecordingEvents() { 300 span.AddAttributes( 301 trace.StringAttribute("tipset", fmt.Sprint(tsk.Cids())), 302 trace.Int64Attribute("count", int64(count)), 303 ) 304 } 305 306 req := &Request{ 307 Head: tsk.Cids(), 308 Length: uint64(count), 309 Options: Headers, 310 } 311 312 validRes, err := c.doRequest(ctx, req, nil, nil) 313 if err != nil { 314 return nil, err 315 } 316 317 return validRes.tipsets, nil 318 } 319 320 // GetFullTipSet implements Client.GetFullTipSet(). Refer to the godocs there. 321 func (c *client) GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { 322 // TODO: round robin through these peers on error 323 324 req := &Request{ 325 Head: tsk.Cids(), 326 Length: 1, 327 Options: Headers | Messages, 328 } 329 330 validRes, err := c.doRequest(ctx, req, &peer, nil) 331 if err != nil { 332 return nil, err 333 } 334 335 return validRes.toFullTipSets()[0], nil 336 // If `doRequest` didn't fail we are guaranteed to have at least 337 // *one* tipset here, so it's safe to index directly. 338 } 339 340 // GetChainMessages implements Client.GetChainMessages(). Refer to the godocs there. 341 func (c *client) GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*CompactedMessages, error) { 342 head := tipsets[0] 343 length := uint64(len(tipsets)) 344 345 ctx, span := trace.StartSpan(ctx, "GetChainMessages") 346 if span.IsRecordingEvents() { 347 span.AddAttributes( 348 trace.StringAttribute("tipset", fmt.Sprint(head.Cids())), 349 trace.Int64Attribute("count", int64(length)), 350 ) 351 } 352 defer span.End() 353 354 req := &Request{ 355 Head: head.Cids(), 356 Length: length, 357 Options: Messages, 358 } 359 360 validRes, err := c.doRequest(ctx, req, nil, tipsets) 361 if err != nil { 362 return nil, err 363 } 364 365 return validRes.messages, nil 366 } 367 368 // Send a request to a peer. Write request in the stream and read the 369 // response back. We do not do any processing of the request/response 370 // here. 371 func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Request) (_ *Response, err error) { 372 // Trace code. 373 ctx, span := trace.StartSpan(ctx, "sendRequestToPeer") 374 defer span.End() 375 if span.IsRecordingEvents() { 376 span.AddAttributes( 377 trace.StringAttribute("peer", peer.Pretty()), 378 ) 379 } 380 defer func() { 381 if err != nil { 382 if span.IsRecordingEvents() { 383 span.SetStatus(trace.Status{ 384 Code: 5, 385 Message: err.Error(), 386 }) 387 } 388 } 389 }() 390 // -- TRACE -- 391 392 supported, err := c.host.Peerstore().SupportsProtocols(peer, BlockSyncProtocolID, ChainExchangeProtocolID) 393 if err != nil { 394 c.RemovePeer(peer) 395 return nil, xerrors.Errorf("failed to get protocols for peer: %w", err) 396 } 397 if len(supported) == 0 || (supported[0] != BlockSyncProtocolID && supported[0] != ChainExchangeProtocolID) { 398 return nil, xerrors.Errorf("peer %s does not support protocols %s", 399 peer, []string{BlockSyncProtocolID, ChainExchangeProtocolID}) 400 } 401 402 connectionStart := build.Clock.Now() 403 404 // Open stream to peer. 405 stream, err := c.host.NewStream( 406 network.WithNoDial(ctx, "should already have connection"), 407 peer, 408 ChainExchangeProtocolID, BlockSyncProtocolID) 409 if err != nil { 410 c.RemovePeer(peer) 411 return nil, xerrors.Errorf("failed to open stream to peer: %w", err) 412 } 413 414 defer stream.Close() //nolint:errcheck 415 416 // Write request. 417 _ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline)) 418 if err := cborutil.WriteCborRPC(stream, req); err != nil { 419 _ = stream.SetWriteDeadline(time.Time{}) 420 c.peerTracker.logFailure(peer, build.Clock.Since(connectionStart), req.Length) 421 // FIXME: Should we also remove peer here? 422 return nil, err 423 } 424 _ = stream.SetWriteDeadline(time.Time{}) // clear deadline // FIXME: Needs 425 // its own API (https://github.com/libp2p/go-libp2p-core/issues/162). 426 427 // Read response. 428 var res Response 429 err = cborutil.ReadCborRPC( 430 bufio.NewReader(incrt.New(stream, ReadResMinSpeed, ReadResDeadline)), 431 &res) 432 if err != nil { 433 c.peerTracker.logFailure(peer, build.Clock.Since(connectionStart), req.Length) 434 return nil, xerrors.Errorf("failed to read chainxchg response: %w", err) 435 } 436 437 // FIXME: Move all this together at the top using a defer as done elsewhere. 438 // Maybe we need to declare `res` in the signature. 439 if span.IsRecordingEvents() { 440 span.AddAttributes( 441 trace.Int64Attribute("resp_status", int64(res.Status)), 442 trace.StringAttribute("msg", res.ErrorMessage), 443 trace.Int64Attribute("chain_len", int64(len(res.Chain))), 444 ) 445 } 446 447 c.peerTracker.logSuccess(peer, build.Clock.Since(connectionStart), uint64(len(res.Chain))) 448 // FIXME: We should really log a success only after we validate the response. 449 // It might be a bit hard to do. 450 return &res, nil 451 } 452 453 // AddPeer implements Client.AddPeer(). Refer to the godocs there. 454 func (c *client) AddPeer(p peer.ID) { 455 c.peerTracker.addPeer(p) 456 } 457 458 // RemovePeer implements Client.RemovePeer(). Refer to the godocs there. 459 func (c *client) RemovePeer(p peer.ID) { 460 c.peerTracker.removePeer(p) 461 } 462 463 // getShuffledPeers returns a preference-sorted set of peers (by latency 464 // and failure counting), shuffling the first few peers so we don't always 465 // pick the same peer. 466 // FIXME: Consider merging with `shufflePrefix()s`. 467 func (c *client) getShuffledPeers() []peer.ID { 468 peers := c.peerTracker.prefSortedPeers() 469 shufflePrefix(peers) 470 return peers 471 } 472 473 func shufflePrefix(peers []peer.ID) { 474 prefix := ShufflePeersPrefix 475 if len(peers) < prefix { 476 prefix = len(peers) 477 } 478 479 buf := make([]peer.ID, prefix) 480 perm := rand.Perm(prefix) 481 for i, v := range perm { 482 buf[i] = peers[v] 483 } 484 485 copy(peers, buf) 486 }