github.com/koko1123/flow-go-1@v0.29.6/network/p2p/scoring/app_score_test.go (about) 1 package scoring_test 2 3 import ( 4 "context" 5 "testing" 6 "time" 7 8 "github.com/libp2p/go-libp2p/core/peer" 9 mocktestify "github.com/stretchr/testify/mock" 10 "github.com/stretchr/testify/require" 11 12 "github.com/koko1123/flow-go-1/model/flow" 13 "github.com/koko1123/flow-go-1/module/id" 14 "github.com/koko1123/flow-go-1/module/irrecoverable" 15 "github.com/koko1123/flow-go-1/module/metrics" 16 "github.com/koko1123/flow-go-1/module/mock" 17 "github.com/koko1123/flow-go-1/network/channels" 18 "github.com/koko1123/flow-go-1/network/internal/p2pfixtures" 19 "github.com/koko1123/flow-go-1/network/p2p" 20 "github.com/koko1123/flow-go-1/network/p2p/scoring" 21 p2ptest "github.com/koko1123/flow-go-1/network/p2p/test" 22 flowpubsub "github.com/koko1123/flow-go-1/network/validator/pubsub" 23 "github.com/koko1123/flow-go-1/utils/unittest" 24 ) 25 26 // TestFullGossipSubConnectivity tests that when the entire network is running by honest nodes, 27 // pushing access nodes to the edges of the network (i.e., the access nodes are not in the mesh of any honest nodes) 28 // will not cause the network to partition, i.e., all honest nodes can still communicate with each other through GossipSub. 29 func TestFullGossipSubConnectivity(t *testing.T) { 30 ctx, cancel := context.WithCancel(context.Background()) 31 signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) 32 sporkId := unittest.IdentifierFixture() 33 idProvider := mock.NewIdentityProvider(t) 34 35 // two groups of non-access nodes and one group of access nodes. 36 groupOneNodes, groupOneIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, 37 p2ptest.WithRole(flow.RoleConsensus), 38 p2ptest.WithPeerScoringEnabled(idProvider)) 39 groupTwoNodes, groupTwoIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, 40 p2ptest.WithRole(flow.RoleCollection), 41 p2ptest.WithPeerScoringEnabled(idProvider)) 42 accessNodeGroup, accessNodeIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, 43 p2ptest.WithRole(flow.RoleAccess), 44 p2ptest.WithPeerScoringEnabled(idProvider)) 45 46 ids := append(append(groupOneIds, groupTwoIds...), accessNodeIds...) 47 nodes := append(append(groupOneNodes, groupTwoNodes...), accessNodeGroup...) 48 49 provider := id.NewFixedIdentityProvider(ids) 50 idProvider.On("ByPeerID", mocktestify.Anything).Return( 51 func(peerId peer.ID) *flow.Identity { 52 identity, _ := provider.ByPeerID(peerId) 53 return identity 54 }, func(peerId peer.ID) bool { 55 _, ok := provider.ByPeerID(peerId) 56 return ok 57 }) 58 p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) 59 defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) 60 61 blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) 62 slashingViolationsConsumer := unittest.NetworkSlashingViolationsConsumer(unittest.Logger(), metrics.NewNoopCollector()) 63 logger := unittest.Logger() 64 65 // all nodes subscribe to block topic (common topic among all roles) 66 // group one 67 groupOneSubs := make([]p2p.Subscription, len(groupOneNodes)) 68 var err error 69 for i, node := range groupOneNodes { 70 groupOneSubs[i], err = node.Subscribe(blockTopic, flowpubsub.TopicValidator(logger, unittest.NetworkCodec(), slashingViolationsConsumer, unittest.AllowAllPeerFilter())) 71 require.NoError(t, err) 72 } 73 // group two 74 groupTwoSubs := make([]p2p.Subscription, len(groupTwoNodes)) 75 for i, node := range groupTwoNodes { 76 groupTwoSubs[i], err = node.Subscribe(blockTopic, flowpubsub.TopicValidator(logger, unittest.NetworkCodec(), slashingViolationsConsumer, unittest.AllowAllPeerFilter())) 77 require.NoError(t, err) 78 } 79 // access node group 80 accessNodeSubs := make([]p2p.Subscription, len(accessNodeGroup)) 81 for i, node := range accessNodeGroup { 82 accessNodeSubs[i], err = node.Subscribe(blockTopic, flowpubsub.TopicValidator(logger, unittest.NetworkCodec(), slashingViolationsConsumer, unittest.AllowAllPeerFilter())) 83 require.NoError(t, err) 84 } 85 86 // creates a topology as follows: 87 // groupOneNodes <--> accessNodeGroup <--> groupTwoNodes 88 p2ptest.LetNodesDiscoverEachOther(t, ctx, append(groupOneNodes, accessNodeGroup...), append(groupOneIds, accessNodeIds...)) 89 p2ptest.LetNodesDiscoverEachOther(t, ctx, append(groupTwoNodes, accessNodeGroup...), append(groupTwoIds, accessNodeIds...)) 90 91 // checks end-to-end message delivery works 92 // each node sends a distinct message to all and checks that all nodes receive it. 93 for _, node := range nodes { 94 proposalMsg := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channels.PushBlocks) 95 require.NoError(t, node.Publish(ctx, blockTopic, proposalMsg)) 96 97 // checks that the message is received by all nodes. 98 ctx1s, cancel1s := context.WithTimeout(ctx, 5*time.Second) 99 p2pfixtures.SubsMustReceiveMessage(t, ctx1s, proposalMsg, groupOneSubs) 100 p2pfixtures.SubsMustReceiveMessage(t, ctx1s, proposalMsg, accessNodeSubs) 101 p2pfixtures.SubsMustReceiveMessage(t, ctx1s, proposalMsg, groupTwoSubs) 102 103 cancel1s() 104 } 105 } 106 107 // TestFullGossipSubConnectivityAmongHonestNodesWithMaliciousMajority is part two of testing pushing access nodes to the edges of the network. 108 // This test proves that if access nodes are PUSHED to the edge of the network, even their malicious majority cannot partition 109 // the network of honest nodes. 110 func TestFullGossipSubConnectivityAmongHonestNodesWithMaliciousMajority(t *testing.T) { 111 // Note: if this test is ever flaky, this means a bug in our scoring system. Please escalate to the team instead of skipping. 112 total := 10 113 for i := 0; i < total; i++ { 114 if !testGossipSubMessageDeliveryUnderNetworkPartition(t, true) { 115 // even one failure should not happen, as it means that malicious majority can partition the network 116 // with our peer scoring parameters. 117 require.Fail(t, "honest nodes could not exchange message on GossipSub") 118 } 119 } 120 } 121 122 // TestNetworkPartitionWithNoHonestPeerScoringInFullTopology is part one of testing pushing access nodes to the edges of the network. 123 // This test proves that if access nodes are NOT pushed to the edge of network, a malicious majority of them can 124 // partition the network by disconnecting honest nodes from each other even when the network topology is a complete graph (i.e., full topology). 125 func TestNetworkPartitionWithNoHonestPeerScoringInFullTopology(t *testing.T) { 126 unittest.SkipUnless(t, unittest.TEST_FLAKY, "to be fixed later") 127 total := 100 128 for i := 0; i < total; i++ { 129 // false means no honest peer scoring. 130 if !testGossipSubMessageDeliveryUnderNetworkPartition(t, false) { 131 return // partition is successful 132 } 133 } 134 require.Fail(t, "expected at least one network partition") 135 } 136 137 // testGossipSubMessageDeliveryUnderNetworkPartition tests that whether two honest nodes can exchange messages on GossipSub 138 // when the network topology is a complete graph (i.e., full topology) and a malicious majority of access nodes are present. 139 // If honestPeerScoring is true, then the honest nodes are enabled with peer scoring. 140 // A true return value means that the two honest nodes can exchange messages. 141 // A false return value means that the two honest nodes cannot exchange messages within the given timeout. 142 func testGossipSubMessageDeliveryUnderNetworkPartition(t *testing.T, honestPeerScoring bool) bool { 143 ctx, cancel := context.WithCancel(context.Background()) 144 signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) 145 sporkId := unittest.IdentifierFixture() 146 147 idProvider := mock.NewIdentityProvider(t) 148 // two (honest) consensus nodes 149 opts := []p2ptest.NodeFixtureParameterOption{p2ptest.WithRole(flow.RoleConsensus)} 150 if honestPeerScoring { 151 opts = append(opts, p2ptest.WithPeerScoringEnabled(idProvider)) 152 } 153 con1Node, con1Id := p2ptest.NodeFixture(t, sporkId, t.Name(), opts...) 154 con2Node, con2Id := p2ptest.NodeFixture(t, sporkId, t.Name(), opts...) 155 156 // create > 2 * 12 malicious access nodes 157 // 12 is the maximum size of default GossipSub mesh. 158 // We want to make sure that it is unlikely for honest nodes to be in the same mesh (hence messages from 159 // one honest node to the other is routed through the malicious nodes). 160 accessNodeGroup, accessNodeIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 30, 161 p2ptest.WithRole(flow.RoleAccess), 162 p2ptest.WithPeerScoringEnabled(idProvider), 163 // overrides the default peer scoring parameters to mute GossipSub traffic from/to honest nodes. 164 p2ptest.WithAppSpecificScore(maliciousAppSpecificScore(flow.IdentityList{&con1Id, &con2Id}))) 165 166 allNodes := append([]p2p.LibP2PNode{con1Node, con2Node}, accessNodeGroup...) 167 allIds := append([]*flow.Identity{&con1Id, &con2Id}, accessNodeIds...) 168 169 provider := id.NewFixedIdentityProvider(allIds) 170 idProvider.On("ByPeerID", mocktestify.Anything).Return( 171 func(peerId peer.ID) *flow.Identity { 172 identity, _ := provider.ByPeerID(peerId) 173 return identity 174 }, func(peerId peer.ID) bool { 175 _, ok := provider.ByPeerID(peerId) 176 return ok 177 }).Maybe() 178 179 p2ptest.StartNodes(t, signalerCtx, allNodes, 100*time.Millisecond) 180 defer p2ptest.StopNodes(t, allNodes, cancel, 2*time.Second) 181 182 blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) 183 slashingViolationsConsumer := unittest.NetworkSlashingViolationsConsumer(unittest.Logger(), metrics.NewNoopCollector()) 184 logger := unittest.Logger() 185 186 // all nodes subscribe to block topic (common topic among all roles) 187 _, err := con1Node.Subscribe(blockTopic, flowpubsub.TopicValidator(logger, unittest.NetworkCodec(), slashingViolationsConsumer, unittest.AllowAllPeerFilter())) 188 require.NoError(t, err) 189 190 con2Sub, err := con2Node.Subscribe(blockTopic, flowpubsub.TopicValidator(logger, unittest.NetworkCodec(), slashingViolationsConsumer, unittest.AllowAllPeerFilter())) 191 require.NoError(t, err) 192 193 // access node group 194 accessNodeSubs := make([]p2p.Subscription, len(accessNodeGroup)) 195 for i, node := range accessNodeGroup { 196 sub, err := node.Subscribe(blockTopic, flowpubsub.TopicValidator(logger, unittest.NetworkCodec(), slashingViolationsConsumer, unittest.AllowAllPeerFilter())) 197 require.NoError(t, err) 198 accessNodeSubs[i] = sub 199 } 200 201 // let nodes reside on a full topology, hence no partition is caused by the topology. 202 p2ptest.LetNodesDiscoverEachOther(t, ctx, allNodes, allIds) 203 204 proposalMsg := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channels.PushBlocks) 205 require.NoError(t, con1Node.Publish(ctx, blockTopic, proposalMsg)) 206 207 // we check that whether within a one-second window the message is received by the other honest consensus node. 208 // the one-second window is important because it triggers the heartbeat of the con1Node to perform a lazy pull (iHave). 209 // And con1Node may randomly choose con2Node as the peer to perform the lazy pull. 210 // However, under a network partition con2Node is not in the mesh of con1Node, and hence is deprived of the eager push from con1Node. 211 // 212 // If no honest peer scoring is enabled, then con1Node and con2Node are less-likely to be in the same mesh, and hence the message is not delivered. 213 // If honest peer scoring is enabled, then con1Node and con2Node are certainly in the same mesh, and hence the message is delivered. 214 ctx1s, cancel1s := context.WithTimeout(ctx, 1*time.Second) 215 defer cancel1s() 216 return p2pfixtures.HasSubReceivedMessage(t, ctx1s, proposalMsg, con2Sub) 217 } 218 219 // maliciousAppSpecificScore returns a malicious app specific score function that rewards the malicious node and 220 // punishes the honest nodes. 221 func maliciousAppSpecificScore(honestIds flow.IdentityList) func(peer.ID) float64 { 222 honestIdProvider := id.NewFixedIdentityProvider(honestIds) 223 return func(p peer.ID) float64 { 224 _, isHonest := honestIdProvider.ByPeerID(p) 225 if isHonest { 226 return scoring.MaxAppSpecificPenalty 227 } 228 229 return scoring.MaxAppSpecificReward 230 } 231 }