github.com/number571/tendermint@v0.34.11-gost/internal/p2p/p2ptest/network.go (about) 1 package p2ptest 2 3 import ( 4 "context" 5 "math/rand" 6 "testing" 7 "time" 8 9 "github.com/gogo/protobuf/proto" 10 "github.com/stretchr/testify/require" 11 dbm "github.com/tendermint/tm-db" 12 13 "github.com/number571/tendermint/crypto" 14 "github.com/number571/tendermint/crypto/gost512" 15 "github.com/number571/tendermint/internal/p2p" 16 "github.com/number571/tendermint/libs/log" 17 "github.com/number571/tendermint/types" 18 ) 19 20 // Network sets up an in-memory network that can be used for high-level P2P 21 // testing. It creates an arbitrary number of nodes that are connected to each 22 // other, and can open channels across all nodes with custom reactors. 23 type Network struct { 24 Nodes map[types.NodeID]*Node 25 26 logger log.Logger 27 memoryNetwork *p2p.MemoryNetwork 28 } 29 30 // NetworkOptions is an argument structure to parameterize the 31 // MakeNetwork function. 32 type NetworkOptions struct { 33 NumNodes int 34 BufferSize int 35 NodeOpts NodeOptions 36 } 37 38 type NodeOptions struct { 39 MaxPeers uint16 40 MaxConnected uint16 41 } 42 43 func (opts *NetworkOptions) setDefaults() { 44 if opts.BufferSize == 0 { 45 opts.BufferSize = 1 46 } 47 } 48 49 // MakeNetwork creates a test network with the given number of nodes and 50 // connects them to each other. 51 func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { 52 opts.setDefaults() 53 logger := log.TestingLogger() 54 network := &Network{ 55 Nodes: map[types.NodeID]*Node{}, 56 logger: logger, 57 memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize), 58 } 59 60 for i := 0; i < opts.NumNodes; i++ { 61 node := network.MakeNode(t, opts.NodeOpts) 62 network.Nodes[node.NodeID] = node 63 } 64 65 return network 66 } 67 68 // Start starts the network by setting up a list of node addresses to dial in 69 // addition to creating a peer update subscription for each node. Finally, all 70 // nodes are connected to each other. 71 func (n *Network) Start(t *testing.T) { 72 // Set up a list of node addresses to dial, and a peer update subscription 73 // for each node. 74 dialQueue := []p2p.NodeAddress{} 75 subs := map[types.NodeID]*p2p.PeerUpdates{} 76 for _, node := range n.Nodes { 77 dialQueue = append(dialQueue, node.NodeAddress) 78 subs[node.NodeID] = node.PeerManager.Subscribe() 79 defer subs[node.NodeID].Close() 80 } 81 82 // For each node, dial the nodes that it still doesn't have a connection to 83 // (either inbound or outbound), and wait for both sides to confirm the 84 // connection via the subscriptions. 85 for i, sourceAddress := range dialQueue { 86 sourceNode := n.Nodes[sourceAddress.NodeID] 87 sourceSub := subs[sourceAddress.NodeID] 88 89 for _, targetAddress := range dialQueue[i+1:] { // nodes <i already connected 90 targetNode := n.Nodes[targetAddress.NodeID] 91 targetSub := subs[targetAddress.NodeID] 92 added, err := sourceNode.PeerManager.Add(targetAddress) 93 require.NoError(t, err) 94 require.True(t, added) 95 96 select { 97 case peerUpdate := <-sourceSub.Updates(): 98 require.Equal(t, p2p.PeerUpdate{ 99 NodeID: targetNode.NodeID, 100 Status: p2p.PeerStatusUp, 101 }, peerUpdate) 102 case <-time.After(3 * time.Second): 103 require.Fail(t, "timed out waiting for peer", "%v dialing %v", 104 sourceNode.NodeID, targetNode.NodeID) 105 } 106 107 select { 108 case peerUpdate := <-targetSub.Updates(): 109 require.Equal(t, p2p.PeerUpdate{ 110 NodeID: sourceNode.NodeID, 111 Status: p2p.PeerStatusUp, 112 }, peerUpdate) 113 case <-time.After(3 * time.Second): 114 require.Fail(t, "timed out waiting for peer", "%v accepting %v", 115 targetNode.NodeID, sourceNode.NodeID) 116 } 117 118 // Add the address to the target as well, so it's able to dial the 119 // source back if that's even necessary. 120 added, err = targetNode.PeerManager.Add(sourceAddress) 121 require.NoError(t, err) 122 require.True(t, added) 123 } 124 } 125 } 126 127 // NodeIDs returns the network's node IDs. 128 func (n *Network) NodeIDs() []types.NodeID { 129 ids := []types.NodeID{} 130 for id := range n.Nodes { 131 ids = append(ids, id) 132 } 133 return ids 134 } 135 136 // MakeChannels makes a channel on all nodes and returns them, automatically 137 // doing error checks and cleanups. 138 func (n *Network) MakeChannels( 139 t *testing.T, 140 chDesc p2p.ChannelDescriptor, 141 messageType proto.Message, 142 size int, 143 ) map[types.NodeID]*p2p.Channel { 144 channels := map[types.NodeID]*p2p.Channel{} 145 for _, node := range n.Nodes { 146 channels[node.NodeID] = node.MakeChannel(t, chDesc, messageType, size) 147 } 148 return channels 149 } 150 151 // MakeChannelsNoCleanup makes a channel on all nodes and returns them, 152 // automatically doing error checks. The caller must ensure proper cleanup of 153 // all the channels. 154 func (n *Network) MakeChannelsNoCleanup( 155 t *testing.T, 156 chDesc p2p.ChannelDescriptor, 157 messageType proto.Message, 158 size int, 159 ) map[types.NodeID]*p2p.Channel { 160 channels := map[types.NodeID]*p2p.Channel{} 161 for _, node := range n.Nodes { 162 channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc, messageType, size) 163 } 164 return channels 165 } 166 167 // RandomNode returns a random node. 168 func (n *Network) RandomNode() *Node { 169 nodes := make([]*Node, 0, len(n.Nodes)) 170 for _, node := range n.Nodes { 171 nodes = append(nodes, node) 172 } 173 return nodes[rand.Intn(len(nodes))] // nolint:gosec 174 } 175 176 // Peers returns a node's peers (i.e. everyone except itself). 177 func (n *Network) Peers(id types.NodeID) []*Node { 178 peers := make([]*Node, 0, len(n.Nodes)-1) 179 for _, peer := range n.Nodes { 180 if peer.NodeID != id { 181 peers = append(peers, peer) 182 } 183 } 184 return peers 185 } 186 187 // Remove removes a node from the network, stopping it and waiting for all other 188 // nodes to pick up the disconnection. 189 func (n *Network) Remove(t *testing.T, id types.NodeID) { 190 require.Contains(t, n.Nodes, id) 191 node := n.Nodes[id] 192 delete(n.Nodes, id) 193 194 subs := []*p2p.PeerUpdates{} 195 for _, peer := range n.Nodes { 196 sub := peer.PeerManager.Subscribe() 197 defer sub.Close() 198 subs = append(subs, sub) 199 } 200 201 require.NoError(t, node.Transport.Close()) 202 if node.Router.IsRunning() { 203 require.NoError(t, node.Router.Stop()) 204 } 205 node.PeerManager.Close() 206 207 for _, sub := range subs { 208 RequireUpdate(t, sub, p2p.PeerUpdate{ 209 NodeID: node.NodeID, 210 Status: p2p.PeerStatusDown, 211 }) 212 } 213 } 214 215 // Node is a node in a Network, with a Router and a PeerManager. 216 type Node struct { 217 NodeID types.NodeID 218 NodeInfo types.NodeInfo 219 NodeAddress p2p.NodeAddress 220 PrivKey crypto.PrivKey 221 Router *p2p.Router 222 PeerManager *p2p.PeerManager 223 Transport *p2p.MemoryTransport 224 } 225 226 // MakeNode creates a new Node configured for the network with a 227 // running peer manager, but does not add it to the existing 228 // network. Callers are responsible for updating peering relationships. 229 func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { 230 privKey := gost512.GenPrivKey() 231 nodeID := types.NodeIDFromPubKey(privKey.PubKey()) 232 nodeInfo := types.NodeInfo{ 233 NodeID: nodeID, 234 ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now. 235 Moniker: string(nodeID), 236 } 237 238 transport := n.memoryNetwork.CreateTransport(nodeID) 239 require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint") 240 241 peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{ 242 MinRetryTime: 10 * time.Millisecond, 243 MaxRetryTime: 100 * time.Millisecond, 244 RetryTimeJitter: time.Millisecond, 245 MaxPeers: opts.MaxPeers, 246 MaxConnected: opts.MaxConnected, 247 }) 248 require.NoError(t, err) 249 250 router, err := p2p.NewRouter( 251 n.logger, 252 p2p.NopMetrics(), 253 nodeInfo, 254 privKey, 255 peerManager, 256 []p2p.Transport{transport}, 257 p2p.RouterOptions{DialSleep: func(_ context.Context) {}}, 258 ) 259 require.NoError(t, err) 260 require.NoError(t, router.Start()) 261 262 t.Cleanup(func() { 263 if router.IsRunning() { 264 require.NoError(t, router.Stop()) 265 } 266 peerManager.Close() 267 require.NoError(t, transport.Close()) 268 }) 269 270 return &Node{ 271 NodeID: nodeID, 272 NodeInfo: nodeInfo, 273 NodeAddress: transport.Endpoints()[0].NodeAddress(nodeID), 274 PrivKey: privKey, 275 Router: router, 276 PeerManager: peerManager, 277 Transport: transport, 278 } 279 } 280 281 // MakeChannel opens a channel, with automatic error handling and cleanup. On 282 // test cleanup, it also checks that the channel is empty, to make sure 283 // all expected messages have been asserted. 284 func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor, 285 messageType proto.Message, size int) *p2p.Channel { 286 channel, err := n.Router.OpenChannel(chDesc, messageType, size) 287 require.NoError(t, err) 288 require.Contains(t, n.Router.NodeInfo().Channels, chDesc.ID) 289 t.Cleanup(func() { 290 RequireEmpty(t, channel) 291 channel.Close() 292 }) 293 return channel 294 } 295 296 // MakeChannelNoCleanup opens a channel, with automatic error handling. The 297 // caller must ensure proper cleanup of the channel. 298 func (n *Node) MakeChannelNoCleanup( 299 t *testing.T, 300 chDesc p2p.ChannelDescriptor, 301 messageType proto.Message, 302 size int, 303 ) *p2p.Channel { 304 305 channel, err := n.Router.OpenChannel(chDesc, messageType, size) 306 require.NoError(t, err) 307 return channel 308 } 309 310 // MakePeerUpdates opens a peer update subscription, with automatic cleanup. 311 // It checks that all updates have been consumed during cleanup. 312 func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates { 313 t.Helper() 314 sub := n.PeerManager.Subscribe() 315 t.Cleanup(func() { 316 t.Helper() 317 RequireNoUpdates(t, sub) 318 sub.Close() 319 }) 320 321 return sub 322 } 323 324 // MakePeerUpdatesNoRequireEmpty opens a peer update subscription, with automatic cleanup. 325 // It does *not* check that all updates have been consumed, but will 326 // close the update channel. 327 func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates { 328 sub := n.PeerManager.Subscribe() 329 t.Cleanup(func() { 330 sub.Close() 331 }) 332 333 return sub 334 } 335 336 func MakeChannelDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { 337 return p2p.ChannelDescriptor{ 338 ID: byte(chID), 339 Priority: 5, 340 SendQueueCapacity: 10, 341 RecvMessageCapacity: 10, 342 MaxSendBytes: 1000, 343 } 344 }