github.com/asynkron/protoactor-go@v0.0.0-20240308120642-ef91a6abee75/cluster/cluster_test_tool/pubsub_test.go (about) 1 package cluster_test_tool 2 3 //TODO: fix this 4 // 5 //import ( 6 // "context" 7 // "strconv" 8 // "sync" 9 // "sync/atomic" 10 // "testing" 11 // "time" 12 // 13 // "github.com/asynkron/protoactor-go/actor" 14 // "github.com/asynkron/protoactor-go/cluster" 15 // "github.com/stretchr/testify/suite" 16 //) 17 // 18 //type PubSubTestSuite struct { 19 // suite.Suite 20 // fixture *PubSubClusterFixture 21 //} 22 // 23 //func (suite *PubSubTestSuite) SetupTest() { 24 // suite.fixture = NewPubSubClusterFixture(suite.T(), 2, false) 25 // suite.fixture.Initialize() 26 //} 27 // 28 //func (suite *PubSubTestSuite) TearDownTest() { 29 // suite.fixture.ShutDown() 30 //} 31 // 32 //func (suite *PubSubTestSuite) TestCanDeliverSingleMessages() { 33 // subscriberIds := suite.fixture.SubscriberIds("single-test", 20) 34 // const topic = "single-test-topic" 35 // const numMessages = 100 36 // 37 // suite.fixture.SubscribeAllTo(topic, subscriberIds) 38 // 39 // for i := 0; i < numMessages; i++ { 40 // data, err := suite.fixture.PublishData(topic, i) 41 // suite.Assert().NoError(err, "message "+strconv.Itoa(i)+" should not has error") 42 // suite.Assert().NotNil(data, "response "+strconv.Itoa(i)+" should not be nil") 43 // } 44 // 45 // suite.fixture.VerifyAllSubscribersGotAllTheData(subscriberIds, numMessages) 46 //} 47 // 48 //func (suite *PubSubTestSuite) TestCanDeliverMessageBatches() { 49 // subscriberIds := suite.fixture.SubscriberIds("batch-test", 20) 50 // const topic = "batch-test-topic" 51 // const numMessages = 100 52 // 53 // suite.fixture.SubscribeAllTo(topic, subscriberIds) 54 // 55 // for i := 0; i < numMessages/10; i++ { 56 // data := intRange(i*10, 10) 57 // batch, err := suite.fixture.PublishDataBatch(topic, data) 58 // suite.Assert().NoError(err, "message "+strconv.Itoa(i)+" should not has error") 59 // suite.Assert().NotNil(batch, "response "+strconv.Itoa(i)+" should not be nil") 60 // } 61 // suite.fixture.VerifyAllSubscribersGotAllTheData(subscriberIds, numMessages) 62 //} 63 // 64 //func (suite *PubSubTestSuite) TestUnsubscribedActorDoesNotReceiveMessages() { 65 // const sub1 = "unsubscribe-test-1" 66 // const sub2 = "unsubscribe-test-2" 67 // const topic = "unsubscribe-test" 68 // 69 // suite.fixture.SubscribeTo(topic, sub1, PubSubSubscriberKind) 70 // suite.fixture.SubscribeTo(topic, sub2, PubSubSubscriberKind) 71 // 72 // suite.fixture.UnSubscribeTo(topic, sub2, PubSubSubscriberKind) 73 // 74 // _, err := suite.fixture.PublishData(topic, 1) 75 // suite.Assert().NoError(err, "PublishData should not has error") 76 // 77 // time.Sleep(time.Second * 1) // give time for the message "not to be delivered" to second subscriber 78 // WaitUntil(suite.T(), func() bool { 79 // suite.fixture.DeliveriesLock.RLock() 80 // defer suite.fixture.DeliveriesLock.RUnlock() 81 // return len(suite.fixture.Deliveries) == 1 82 // }, "only one delivery should happen because the other actor is unsubscribed", DefaultWaitTimeout) 83 // 84 // suite.fixture.DeliveriesLock.RLock() 85 // defer suite.fixture.DeliveriesLock.RUnlock() 86 // suite.Assert().Len(suite.fixture.Deliveries, 1, "only one delivery should happen because the other actor is unsubscribed") 87 // suite.Assert().Equal(sub1, suite.fixture.Deliveries[0].Identity, "the other actor should be unsubscribed") 88 //} 89 // 90 //func (suite *PubSubTestSuite) TestCanSubscribeWithPid() { 91 // const topic = "pid-subscribe" 92 // 93 // var deliveredMessage *DataPublished 94 // 95 // props := actor.PropsFromFunc(func(context actor.Context) { 96 // switch msg := context.Message().(type) { 97 // case *DataPublished: 98 // deliveredMessage = msg 99 // } 100 // }) 101 // member := suite.fixture.GetMembers()[0] 102 // pid := member.ActorSystem.Root.Spawn(props) 103 // _, err := member.SubscribeByPid(topic, pid) 104 // suite.Assert().NoError(err, "SubscribeByPid should not has error") 105 // 106 // _, err = suite.fixture.PublishData(topic, 1) 107 // suite.Assert().NoError(err, "PublishData should not has error") 108 // 109 // WaitUntil(suite.T(), func() bool { 110 // return deliveredMessage != nil 111 // }, "message should be delivered", DefaultWaitTimeout) 112 // suite.Assert().EqualValues(1, deliveredMessage.Data) 113 //} 114 // 115 //func (suite *PubSubTestSuite) TestCanUnsubscribeWithPid() { 116 // const topic = "pid-unsubscribe" 117 // 118 // var deliveryCount int32 = 0 119 // 120 // props := actor.PropsFromFunc(func(context actor.Context) { 121 // switch context.Message().(type) { 122 // case *DataPublished: 123 // atomic.AddInt32(&deliveryCount, 1) 124 // } 125 // }) 126 // member := suite.fixture.GetMembers()[0] 127 // pid := member.ActorSystem.Root.Spawn(props) 128 // _, err := member.SubscribeByPid(topic, pid) 129 // suite.Assert().NoError(err, "SubscribeByPid should not has error") 130 // 131 // _, err = member.UnsubscribeByPid(topic, pid) 132 // suite.Assert().NoError(err, "UnsubscribeByPid should not has error") 133 // 134 // _, err = suite.fixture.PublishData(topic, 1) 135 // suite.Assert().NoError(err, "PublishData should not has error") 136 // 137 // time.Sleep(time.Second * 1) // give time for the message "not to be delivered" to second subscriber 138 // suite.Assert().EqualValues(0, deliveryCount, "message should not be delivered") 139 //} 140 // 141 //func (suite *PubSubTestSuite) TestStoppedActorThatDidNotUnsubscribeDoesNotBlockPublishingToTopic() { 142 // const topic = "missing-unsubscribe" 143 // var deliveryCount int32 = 0 144 // 145 // // this scenario is only relevant for regular actors, 146 // // virtual actors always exist, so the msgs should never be deadlettered 147 // props := actor.PropsFromFunc(func(context actor.Context) { 148 // switch context.Message().(type) { 149 // case *DataPublished: 150 // atomic.AddInt32(&deliveryCount, 1) 151 // } 152 // }) 153 // member := suite.fixture.GetMembers()[0] 154 // pid1 := member.ActorSystem.Root.Spawn(props) 155 // pid2 := member.ActorSystem.Root.Spawn(props) 156 // 157 // // spawn two actors and subscribe them to the topic 158 // _, err := member.SubscribeByPid(topic, pid1) 159 // suite.Assert().NoError(err, "SubscribeByPid1 should not has error") 160 // _, err = member.SubscribeByPid(topic, pid2) 161 // suite.Assert().NoError(err, "SubscribeByPid2 should not has error") 162 // 163 // // publish one message 164 // _, err = suite.fixture.PublishData(topic, 1) 165 // suite.Assert().NoError(err, "PublishData should not has error") 166 // 167 // WaitUntil(suite.T(), func() bool { 168 // return atomic.LoadInt32(&deliveryCount) == 2 169 // }, "both messages should be delivered", DefaultWaitTimeout) 170 // 171 // // kill one of the actors 172 // member.ActorSystem.Root.Stop(pid2) 173 // 174 // // publish again 175 // _, err = suite.fixture.PublishData(topic, 2) 176 // suite.Assert().NoError(err, "PublishData should not has error") 177 // 178 // WaitUntil(suite.T(), func() bool { 179 // return atomic.LoadInt32(&deliveryCount) == 3 180 // }, "second publish should be delivered only to one of the actors", DefaultWaitTimeout) 181 // 182 // WaitUntil(suite.T(), func() bool { 183 // subscribers, err := suite.fixture.GetSubscribersForTopic(topic) 184 // suite.Assert().NoError(err, "GetSubscribersForTopic should not has error") 185 // 186 // hasPid2 := false 187 // for _, subscriber := range subscribers.Subscribers { 188 // if subscriber.GetPid() != nil && 189 // subscriber.GetPid().Id == pid2.Id && 190 // subscriber.GetPid().Address == pid2.Address { 191 // hasPid2 = true 192 // break 193 // } 194 // } 195 // return !hasPid2 196 // }, "pid2 should be removed from subscriber store", DefaultWaitTimeout*1000) 197 //} 198 // 199 //func (suite *PubSubTestSuite) TestSlowPidSubscriberThatTimesOutDoesNotPreventSubsequentPublishes() { 200 // const topic = "slow-pid-subscriber" 201 // var deliveryCount int32 = 0 202 // 203 // // a slow subscriber that will timeout 204 // props := actor.PropsFromFunc(func(context actor.Context) { 205 // time.Sleep(time.Second * 4) 206 // atomic.AddInt32(&deliveryCount, 1) 207 // }) 208 // 209 // member := suite.fixture.RandomMember() 210 // pid := member.ActorSystem.Root.Spawn(props) 211 // _, err := member.SubscribeByPid(topic, pid) 212 // suite.Assert().NoError(err, "SubscribeByPid should not has error") 213 // 214 // // publish one message 215 // _, err = suite.fixture.PublishData(topic, 1) 216 // suite.Assert().NoError(err, "PublishData should not has error") 217 // 218 // // next published message should also be delivered 219 // _, err = suite.fixture.PublishData(topic, 1) 220 // suite.Assert().NoError(err, "PublishData should not has error") 221 // 222 // WaitUntil(suite.T(), func() bool { 223 // return atomic.LoadInt32(&deliveryCount) == 2 224 // }, "A timing out subscriber should not prevent subsequent publishes", time.Second*10) 225 //} 226 // 227 //func (suite *PubSubTestSuite) TestSlowClusterIdentitySubscriberThatTimesOutDoesNotPreventSubsequentPublishes() { 228 // const topic = "slow-ci-subscriber" 229 // suite.fixture.SubscribeTo(topic, "slow-ci-1", PubSubTimeoutSubscriberKind) 230 // 231 // // publish one message 232 // _, err := suite.fixture.PublishData(topic, 1) 233 // suite.Assert().NoError(err, "PublishData1 should not has error") 234 // 235 // // next published message should also be delivered 236 // _, err = suite.fixture.PublishData(topic, 1) 237 // suite.Assert().NoError(err, "PublishData2 should not has error") 238 // 239 // WaitUntil(suite.T(), func() bool { 240 // suite.fixture.DeliveriesLock.RLock() 241 // defer suite.fixture.DeliveriesLock.RUnlock() 242 // 243 // return len(suite.fixture.Deliveries) == 2 244 // }, "A timing out subscriber should not prevent subsequent publishes", time.Second*10) 245 //} 246 // 247 //func (suite *PubSubTestSuite) TestCanPublishMessagesViaBatchingProducer() { 248 // subscriberIds := suite.fixture.SubscriberIds("batching-producer-test", 20) 249 // const topic = "batching-producer" 250 // const numMessages = 100 251 // 252 // suite.fixture.SubscribeAllTo(topic, subscriberIds) 253 // 254 // producer := suite.fixture.GetMembers()[0].BatchingProducer(topic, cluster.WithBatchingProducerBatchSize(10)) 255 // defer producer.Dispose() 256 // 257 // wg := sync.WaitGroup{} 258 // for i := 0; i < numMessages; i++ { 259 // wg.Add(1) 260 // go func(i int) { 261 // defer wg.Done() 262 // _, err := producer.Produce(context.Background(), &DataPublished{Data: int32(i)}) 263 // suite.Assert().NoError(err, "Produce should not has error") 264 // }(i) 265 // } 266 // wg.Wait() 267 // 268 // suite.fixture.VerifyAllSubscribersGotAllTheData(subscriberIds, numMessages) 269 //} 270 // 271 //func (suite *PubSubTestSuite) TestCanPublishMessagesViaBatchingProducerWithCustomQueue() { 272 // subscriberIds := suite.fixture.SubscriberIds("batching-producer-test-with-chan", 20) 273 // const topic = "batching-producer-with-chan" 274 // const numMessages = 100 275 // 276 // suite.fixture.SubscribeAllTo(topic, subscriberIds) 277 // 278 // producer := suite.fixture.GetMembers()[0].BatchingProducer(topic, cluster.WithBatchingProducerBatchSize(10), cluster.WithBatchingProducerMaxQueueSize(2000)) 279 // defer producer.Dispose() 280 // 281 // wg := sync.WaitGroup{} 282 // for i := 0; i < numMessages; i++ { 283 // wg.Add(1) 284 // go func(i int) { 285 // defer wg.Done() 286 // _, err := producer.Produce(context.Background(), &DataPublished{Data: int32(i)}) 287 // suite.Assert().NoError(err, "Produce should not has error") 288 // }(i) 289 // } 290 // wg.Wait() 291 // 292 // suite.fixture.VerifyAllSubscribersGotAllTheData(subscriberIds, numMessages) 293 //} 294 // 295 //func (suite *PubSubTestSuite) TestWillExpireTopicActorAfterIdle() { 296 // subscriberIds := suite.fixture.SubscriberIds("batching-producer-idl-test", 20) 297 // const topic = "batching-producer" 298 // const numMessages = 100 299 // 300 // suite.fixture.SubscribeAllTo(topic, subscriberIds) 301 // 302 // firstCluster := suite.fixture.GetMembers()[0] 303 // 304 // producer := firstCluster.BatchingProducer(topic, cluster.WithBatchingProducerPublisherIdleTimeout(time.Second*2)) 305 // defer producer.Dispose() 306 // 307 // wg := sync.WaitGroup{} 308 // for i := 0; i < numMessages; i++ { 309 // wg.Add(1) 310 // go func(i int) { 311 // defer wg.Done() 312 // _, err := producer.Produce(context.Background(), &DataPublished{Data: int32(i)}) 313 // suite.Assert().NoError(err, "Produce should not has error") 314 // }(i) 315 // } 316 // wg.Wait() 317 // 318 // pid := firstCluster.Get(topic, cluster.TopicActorKind) 319 // suite.Assert().NotNil(pid, "Topic actor should not be nil") 320 // 321 // time.Sleep(time.Second * 5) 322 // 323 // newPid := firstCluster.Get(topic, cluster.TopicActorKind) 324 // suite.Assert().NotEqual(pid.String(), newPid.String(), "Topic actor should be recreated") 325 //} 326 // 327 //// In order for 'go test' to run this suite, we need to create 328 //// a normal test function and pass our suite to suite.Run 329 //func TestPubSubTestSuite(t *testing.T) { 330 // suite.Run(t, new(PubSubTestSuite)) 331 //} 332 // 333 //func intRange(start int, count int) []int { 334 // res := make([]int, count) 335 // for i := 0; i < count; i++ { 336 // res[i] = start + i 337 // } 338 // return res 339 //}