github.com/braveheart12/insolar-09-08-19@v0.8.7/network/servicenetwork/integr_test.go (about) 1 // +build networktest 2 3 /* 4 * The Clear BSD License 5 * 6 * Copyright (c) 2019 Insolar Technologies 7 * 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met: 11 * 12 * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 13 * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 14 * Neither the name of Insolar Technologies nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 15 * 16 * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 17 * 18 */ 19 20 package servicenetwork 21 22 import ( 23 "context" 24 "fmt" 25 "sync" 26 "testing" 27 "time" 28 29 "github.com/insolar/insolar/consensus/phases" 30 "github.com/insolar/insolar/core" 31 "github.com/insolar/insolar/log" 32 "github.com/insolar/insolar/network/nodenetwork" 33 "github.com/stretchr/testify/assert" 34 "github.com/stretchr/testify/require" 35 "github.com/stretchr/testify/suite" 36 ) 37 38 var ( 39 consensusMin = 5 // minimum count of participants that can survive when one node leaves 40 consensusMinMsg = fmt.Sprintf("skip test for bootstrap nodes < %d", consensusMin) 41 ) 42 43 func (s *testSuite) TestNetworkConsensus3Times() { 44 s.waitForConsensus(3) 45 } 46 47 func (s *testSuite) TestNodeConnect() { 48 testNode := newNetworkNode() 49 s.preInitNode(testNode) 50 51 s.InitNode(testNode) 52 s.StartNode(testNode) 53 defer func(s *testSuite) { 54 s.StopNode(testNode) 55 }(s) 56 57 s.waitForConsensus(1) 58 59 activeNodes := s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 60 s.Equal(s.getNodesCount(), len(activeNodes)) 61 62 s.waitForConsensus(1) 63 64 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 65 s.Equal(s.getNodesCount(), len(activeNodes)) 66 67 s.waitForConsensus(2) 68 69 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 70 s.Equal(s.getNodesCount()+1, len(activeNodes)) 71 activeNodes = testNode.serviceNetwork.NodeKeeper.GetWorkingNodes() 72 s.Equal(s.getNodesCount()+1, len(activeNodes)) 73 } 74 75 func (s *testSuite) TestNodeConnectInvalidVersion() { 76 testNode := newNetworkNode() 77 s.preInitNode(testNode) 78 testNode.serviceNetwork.NodeKeeper.GetOrigin().(nodenetwork.MutableNode).SetVersion("ololo") 79 s.InitNode(testNode) 80 err := testNode.componentManager.Start(s.fixture().ctx) 81 assert.Error(s.T(), err) 82 log.Infof("Error: %s", err) 83 } 84 85 func (s *testSuite) TestTwoNodesConnect() { 86 if len(s.fixture().bootstrapNodes) < consensusMin { 87 s.T().Skip(consensusMinMsg) 88 } 89 90 testNode := newNetworkNode() 91 testNode2 := newNetworkNode() 92 93 s.preInitNode(testNode) 94 s.preInitNode(testNode2) 95 96 s.InitNode(testNode) 97 s.InitNode(testNode2) 98 99 wg := sync.WaitGroup{} 100 wg.Add(2) 101 102 go func(wg *sync.WaitGroup) { 103 s.StartNode(testNode) 104 wg.Done() 105 }(&wg) 106 107 go func(wg *sync.WaitGroup) { 108 s.StartNode(testNode2) 109 wg.Done() 110 }(&wg) 111 112 wg.Wait() 113 114 defer func(s *testSuite) { 115 s.StopNode(testNode) 116 s.StopNode(testNode2) 117 }(s) 118 119 s.waitForConsensus(1) 120 121 activeNodes := s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 122 s.Equal(s.getNodesCount(), len(activeNodes)) 123 124 s.waitForConsensus(1) 125 126 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 127 s.Equal(s.getNodesCount(), len(activeNodes)) 128 129 s.waitForConsensus(2) 130 131 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 132 s.Equal(s.getNodesCount()+2, len(activeNodes)) 133 activeNodes = testNode.serviceNetwork.NodeKeeper.GetWorkingNodes() 134 s.Equal(s.getNodesCount()+2, len(activeNodes)) 135 activeNodes = testNode2.serviceNetwork.NodeKeeper.GetWorkingNodes() 136 s.Equal(s.getNodesCount()+2, len(activeNodes)) 137 } 138 139 func (s *testSuite) TestNodeLeave() { 140 if len(s.fixture().bootstrapNodes) < consensusMin { 141 s.T().Skip(consensusMinMsg) 142 } 143 144 testNode := newNetworkNode() 145 s.preInitNode(testNode) 146 147 s.InitNode(testNode) 148 s.StartNode(testNode) 149 defer func(s *testSuite) { 150 s.StopNode(testNode) 151 }(s) 152 153 s.waitForConsensus(2) 154 155 activeNodes := s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 156 s.Equal(s.getNodesCount(), len(activeNodes)) 157 158 s.waitForConsensus(1) 159 160 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 161 s.Equal(s.getNodesCount()+1, len(activeNodes)) 162 163 testNode.serviceNetwork.Leave(context.Background(), 0) 164 165 s.waitForConsensus(2) 166 167 // one active node becomes "not working" 168 workingNodes := s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 169 s.Equal(s.getNodesCount(), len(workingNodes)) 170 171 // but all nodes are active 172 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 173 s.Equal(s.getNodesCount()+1, len(activeNodes)) 174 } 175 176 func (s *testSuite) TestNodeLeaveAtETA() { 177 if len(s.fixture().bootstrapNodes) < consensusMin { 178 s.T().Skip(consensusMinMsg) 179 } 180 181 testNode := newNetworkNode() 182 s.preInitNode(testNode) 183 184 s.InitNode(testNode) 185 s.StartNode(testNode) 186 defer func(s *testSuite) { 187 s.StopNode(testNode) 188 }(s) 189 190 // wait for node will be added at active list 191 s.waitForConsensus(2) 192 activeNodes := s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 193 s.Equal(s.getNodesCount()+1, len(activeNodes)) 194 195 pulse, err := s.fixture().bootstrapNodes[0].serviceNetwork.PulseStorage.Current(s.fixture().ctx) 196 s.NoError(err) 197 198 // next pulse will be last for this node 199 testNode.serviceNetwork.Leave(s.fixture().ctx, pulse.NextPulseNumber) 200 201 // node still active and working 202 s.waitForConsensus(1) 203 workingNodes := s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 204 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 205 s.Equal(s.getNodesCount()+1, len(activeNodes)) 206 s.Equal(s.getNodesCount()+1, len(workingNodes)) 207 208 // now node leaves, but it's still in active list 209 s.waitForConsensus(1) 210 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 211 workingNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 212 s.Equal(s.getNodesCount(), len(workingNodes)) 213 s.Equal(s.getNodesCount()+1, len(activeNodes)) 214 } 215 216 func (s *testSuite) TestNodeComeAfterAnotherNodeSendLeaveETA() { 217 s.T().Skip("fix testcase in TESTNET 2.0") 218 if len(s.fixture().bootstrapNodes) < consensusMin { 219 s.T().Skip(consensusMinMsg) 220 } 221 222 leavingNode := newNetworkNode() 223 s.preInitNode(leavingNode) 224 225 s.InitNode(leavingNode) 226 s.StartNode(leavingNode) 227 defer func(s *testSuite) { 228 s.StopNode(leavingNode) 229 }(s) 230 231 // wait for node will be added at active list 232 s.waitForConsensus(2) 233 activeNodes := s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 234 s.Equal(s.getNodesCount()+1, len(activeNodes)) 235 236 pulse, err := s.fixture().bootstrapNodes[0].serviceNetwork.PulseStorage.Current(s.fixture().ctx) 237 s.NoError(err) 238 239 // leaving in 3 pulses 240 pulseDelta := pulse.NextPulseNumber - pulse.PulseNumber 241 leavingNode.serviceNetwork.Leave(s.fixture().ctx, pulse.PulseNumber+3*pulseDelta) 242 243 // wait for leavingNode will be marked as leaving 244 s.waitForConsensus(1) 245 246 newNode := newNetworkNode() 247 s.preInitNode(newNode) 248 249 s.InitNode(newNode) 250 s.StartNode(newNode) 251 defer func(s *testSuite) { 252 s.StopNode(newNode) 253 }(s) 254 255 // wait for newNode will be added at active list, its a last pulse for leavingNode 256 s.waitForConsensus(2) 257 258 // newNode doesn't have workingNodes 259 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 260 workingNodes := s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 261 newNodeWorkingNodes := newNode.serviceNetwork.NodeKeeper.GetWorkingNodes() 262 263 s.Equal(s.getNodesCount()+2, len(activeNodes)) 264 s.Equal(s.getNodesCount()+1, len(workingNodes)) 265 s.Equal(0, len(newNodeWorkingNodes)) 266 267 // newNode have to have same working node list as other nodes, but it doesn't because it miss leaving claim 268 s.waitForConsensus(1) 269 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 270 workingNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 271 newNodeWorkingNodes = newNode.serviceNetwork.NodeKeeper.GetWorkingNodes() 272 273 s.Equal(s.getNodesCount()+2, len(activeNodes)) 274 s.Equal(s.getNodesCount()+1, len(workingNodes)) 275 // TODO: fix this testcase 276 s.Equal(s.getNodesCount()+1, len(newNodeWorkingNodes)) 277 278 // leaveNode leaving, newNode still ok 279 s.waitForConsensus(1) 280 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetActiveNodes() 281 workingNodes = newNode.serviceNetwork.NodeKeeper.GetWorkingNodes() 282 newNodeWorkingNodes = newNode.serviceNetwork.NodeKeeper.GetWorkingNodes() 283 284 s.Equal(s.getNodesCount()+1, len(activeNodes)) 285 s.Equal(s.getNodesCount()+1, len(workingNodes)) 286 s.Equal(s.getNodesCount()+1, len(newNodeWorkingNodes)) 287 } 288 289 func TestServiceNetworkOneBootstrap(t *testing.T) { 290 s := NewTestSuite(1, 0) 291 suite.Run(t, s) 292 } 293 294 func TestServiceNetworkManyBootstraps(t *testing.T) { 295 s := NewTestSuite(15, 0) 296 suite.Run(t, s) 297 } 298 299 func TestServiceNetworkManyNodes(t *testing.T) { 300 t.Skip("tmp 123") 301 302 s := NewTestSuite(5, 10) 303 suite.Run(t, s) 304 } 305 306 // Full timeout test 307 type FullTimeoutPhaseManager struct { 308 } 309 310 func (ftpm *FullTimeoutPhaseManager) OnPulse(ctx context.Context, pulse *core.Pulse, pulseStartTime time.Time) error { 311 return nil 312 } 313 314 func (s *testSuite) TestFullTimeOut() { 315 if len(s.fixture().bootstrapNodes) < consensusMin { 316 s.T().Skip(consensusMinMsg) 317 } 318 319 // TODO: make this set operation thread-safe somehow (race detector does not like this code) 320 wrapper := s.fixture().bootstrapNodes[1].serviceNetwork.PhaseManager.(*phaseManagerWrapper) 321 wrapper.original = &FullTimeoutPhaseManager{} 322 s.fixture().bootstrapNodes[1].serviceNetwork.PhaseManager = wrapper 323 324 s.waitForConsensus(2) 325 326 activeNodes := s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 327 s.Equal(s.getNodesCount()-1, len(activeNodes)) 328 } 329 330 // Partial timeout 331 332 func (s *testSuite) TestPartialPositive1PhaseTimeOut() { 333 if len(s.fixture().bootstrapNodes) < consensusMin { 334 s.T().Skip(consensusMinMsg) 335 } 336 337 setCommunicatorMock(s.fixture().bootstrapNodes, PartialPositive1Phase) 338 339 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 340 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 341 s.Equal(s.getNodesCount(), len(activeNodes)) 342 } 343 344 func (s *testSuite) TestPartialPositive2PhaseTimeOut() { 345 if len(s.fixture().bootstrapNodes) < consensusMin { 346 s.T().Skip(consensusMinMsg) 347 } 348 349 setCommunicatorMock(s.fixture().bootstrapNodes, PartialPositive2Phase) 350 351 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 352 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 353 s.Equal(s.getNodesCount(), len(activeNodes)) 354 } 355 356 func (s *testSuite) TestPartialNegative1PhaseTimeOut() { 357 if len(s.fixture().bootstrapNodes) < consensusMin { 358 s.T().Skip(consensusMinMsg) 359 } 360 361 setCommunicatorMock(s.fixture().bootstrapNodes, PartialNegative1Phase) 362 363 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 364 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 365 s.Equal(s.getNodesCount()-1, len(activeNodes)) 366 } 367 368 func (s *testSuite) TestPartialNegative2PhaseTimeOut() { 369 if len(s.fixture().bootstrapNodes) < consensusMin { 370 s.T().Skip(consensusMinMsg) 371 } 372 373 setCommunicatorMock(s.fixture().bootstrapNodes, PartialNegative2Phase) 374 375 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 376 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 377 s.Equal(s.getNodesCount(), len(activeNodes)) 378 } 379 380 func (s *testSuite) TestPartialNegative3PhaseTimeOut() { 381 if len(s.fixture().bootstrapNodes) < consensusMin { 382 s.T().Skip(consensusMinMsg) 383 } 384 385 setCommunicatorMock(s.fixture().bootstrapNodes, PartialNegative3Phase) 386 387 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 388 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 389 s.Equal(s.getNodesCount(), len(activeNodes)) 390 } 391 392 func (s *testSuite) TestPartialPositive3PhaseTimeOut() { 393 if len(s.fixture().bootstrapNodes) < consensusMin { 394 s.T().Skip(consensusMinMsg) 395 } 396 397 setCommunicatorMock(s.fixture().bootstrapNodes, PartialPositive3Phase) 398 399 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 400 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 401 s.Equal(s.getNodesCount(), len(activeNodes)) 402 } 403 404 func (s *testSuite) TestPartialNegative23PhaseTimeOut() { 405 if len(s.fixture().bootstrapNodes) < consensusMin { 406 s.T().Skip(consensusMinMsg) 407 } 408 409 setCommunicatorMock(s.fixture().bootstrapNodes, PartialNegative23Phase) 410 411 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 412 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 413 s.Equal(s.getNodesCount(), len(activeNodes)) 414 } 415 416 func (s *testSuite) TestPartialPositive23PhaseTimeOut() { 417 if len(s.fixture().bootstrapNodes) < consensusMin { 418 s.T().Skip(consensusMinMsg) 419 } 420 421 setCommunicatorMock(s.fixture().bootstrapNodes, PartialPositive23Phase) 422 423 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 424 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 425 s.Equal(s.getNodesCount(), len(activeNodes)) 426 } 427 428 func (s *testSuite) TestDiscoveryDown() { 429 if len(s.fixture().bootstrapNodes) < consensusMin { 430 s.T().Skip(consensusMinMsg) 431 } 432 433 s.fixture().bootstrapNodes[0].serviceNetwork.Stop(context.Background()) 434 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 435 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 436 s.Equal(s.getNodesCount()-1, len(activeNodes)) 437 } 438 439 func (s *testSuite) TestDiscoveryRestart() { 440 if len(s.fixture().bootstrapNodes) < consensusMin { 441 s.T().Skip(consensusMinMsg) 442 } 443 444 s.waitForConsensus(2) 445 446 log.Info("Discovery node stopping...") 447 err := s.fixture().bootstrapNodes[0].serviceNetwork.Stop(context.Background()) 448 s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.(*nodeKeeperWrapper).Wipe(true) 449 log.Info("Discovery node stopped...") 450 require.NoError(s.T(), err) 451 452 s.waitForConsensusExcept(2, s.fixture().bootstrapNodes[0].id) 453 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 454 s.Equal(s.getNodesCount()-1, len(activeNodes)) 455 456 log.Info("Discovery node starting...") 457 err = s.fixture().bootstrapNodes[0].serviceNetwork.Start(context.Background()) 458 log.Info("Discovery node started") 459 require.NoError(s.T(), err) 460 461 s.waitForConsensusExcept(3, s.fixture().bootstrapNodes[0].id) 462 activeNodes = s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 463 s.Equal(s.getNodesCount(), len(activeNodes)) 464 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 465 s.Equal(s.getNodesCount(), len(activeNodes)) 466 } 467 468 func (s *testSuite) TestDiscoveryRestartNoWait() { 469 if len(s.fixture().bootstrapNodes) < consensusMin { 470 s.T().Skip(consensusMinMsg) 471 } 472 473 s.waitForConsensus(2) 474 475 log.Info("Discovery node stopping...") 476 err := s.fixture().bootstrapNodes[0].serviceNetwork.Stop(context.Background()) 477 s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.(*nodeKeeperWrapper).Wipe(true) 478 log.Info("Discovery node stopped...") 479 require.NoError(s.T(), err) 480 481 go func(s *testSuite) { 482 log.Info("Discovery node starting...") 483 err = s.fixture().bootstrapNodes[0].serviceNetwork.Start(context.Background()) 484 log.Info("Discovery node started") 485 require.NoError(s.T(), err) 486 }(s) 487 488 s.waitForConsensusExcept(4, s.fixture().bootstrapNodes[0].id) 489 activeNodes := s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetActiveNodes() 490 s.Equal(s.getNodesCount(), len(activeNodes)) 491 s.waitForConsensusExcept(1, s.fixture().bootstrapNodes[0].id) 492 activeNodes = s.fixture().bootstrapNodes[0].serviceNetwork.NodeKeeper.GetWorkingNodes() 493 s.Equal(s.getNodesCount(), len(activeNodes)) 494 activeNodes = s.fixture().bootstrapNodes[1].serviceNetwork.NodeKeeper.GetWorkingNodes() 495 s.Equal(s.getNodesCount(), len(activeNodes)) 496 } 497 498 func setCommunicatorMock(nodes []*networkNode, opt CommunicatorTestOpt) { 499 ref := nodes[0].id 500 timedOutNodesCount := 0 501 switch opt { 502 case PartialNegative1Phase, PartialNegative2Phase, PartialNegative3Phase, PartialNegative23Phase: 503 timedOutNodesCount = int(float64(len(nodes)) * 0.6) 504 case PartialPositive1Phase, PartialPositive2Phase, PartialPositive3Phase, PartialPositive23Phase: 505 timedOutNodesCount = int(float64(len(nodes)) * 0.2) 506 } 507 // TODO: make these set operations thread-safe somehow (race detector does not like this code) 508 for i := 1; i <= timedOutNodesCount; i++ { 509 comm := nodes[i].serviceNetwork.PhaseManager.(*phaseManagerWrapper).original.(*phases.Phases).FirstPhase.(*phases.FirstPhaseImpl).Communicator 510 wrapper := &CommunicatorMock{communicator: comm, ignoreFrom: ref, testOpt: opt} 511 phasemanager := nodes[i].serviceNetwork.PhaseManager.(*phaseManagerWrapper).original.(*phases.Phases) 512 phasemanager.FirstPhase.(*phases.FirstPhaseImpl).Communicator = wrapper 513 phasemanager.SecondPhase.(*phases.SecondPhaseImpl).Communicator = wrapper 514 phasemanager.ThirdPhase.(*phases.ThirdPhaseImpl).Communicator = wrapper 515 } 516 }