github.com/matrixorigin/matrixone@v0.7.0/pkg/hakeeper/rsm_test.go (about) 1 // Copyright 2021 - 2022 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package hakeeper 16 17 import ( 18 "bytes" 19 "sort" 20 "testing" 21 22 sm "github.com/lni/dragonboat/v4/statemachine" 23 "github.com/stretchr/testify/assert" 24 "github.com/stretchr/testify/require" 25 26 pb "github.com/matrixorigin/matrixone/pkg/pb/logservice" 27 "github.com/matrixorigin/matrixone/pkg/pb/metadata" 28 ) 29 30 func TestAssignID(t *testing.T) { 31 tsm := NewStateMachine(0, 1).(*stateMachine) 32 assert.Equal(t, uint64(0), tsm.state.NextID) 33 assert.Equal(t, uint64(1), tsm.assignID()) 34 assert.Equal(t, uint64(1), tsm.state.NextID) 35 } 36 37 func TestHAKeeperStateMachineCanBeCreated(t *testing.T) { 38 defer func() { 39 if r := recover(); r == nil { 40 t.Fatalf("failed to panic") 41 } 42 }() 43 tsm := NewStateMachine(0, 1).(*stateMachine) 44 assert.Equal(t, uint64(1), tsm.replicaID) 45 NewStateMachine(1, 1) 46 } 47 48 func TestHAKeeperStateMachineSnapshot(t *testing.T) { 49 tsm1 := NewStateMachine(0, 1).(*stateMachine) 50 tsm2 := NewStateMachine(0, 2).(*stateMachine) 51 tsm1.state.NextID = 12345 52 tsm1.state.LogShards["test1"] = 23456 53 tsm1.state.LogShards["test2"] = 34567 54 55 buf := bytes.NewBuffer(nil) 56 assert.Nil(t, tsm1.SaveSnapshot(buf, nil, nil)) 57 assert.Nil(t, tsm2.RecoverFromSnapshot(buf, nil, nil)) 58 assert.Equal(t, tsm1.state.NextID, tsm2.state.NextID) 59 assert.Equal(t, tsm1.state.LogShards, tsm2.state.LogShards) 60 assert.True(t, tsm1.replicaID != tsm2.replicaID) 61 } 62 63 func TestHAKeeperCanBeClosed(t *testing.T) { 64 tsm1 := NewStateMachine(0, 1).(*stateMachine) 65 assert.Nil(t, tsm1.Close()) 66 } 67 68 func TestHAKeeperTick(t *testing.T) { 69 tsm1 := NewStateMachine(0, 1).(*stateMachine) 70 assert.Equal(t, uint64(0), tsm1.state.Tick) 71 cmd := GetTickCmd() 72 _, err := tsm1.Update(sm.Entry{Cmd: cmd}) 73 assert.NoError(t, err) 74 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 75 assert.NoError(t, err) 76 assert.Equal(t, uint64(2), tsm1.state.Tick) 77 } 78 79 func TestHandleLogHeartbeat(t *testing.T) { 80 tsm1 := NewStateMachine(0, 1).(*stateMachine) 81 cmd := GetTickCmd() 82 _, err := tsm1.Update(sm.Entry{Cmd: cmd}) 83 assert.NoError(t, err) 84 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 85 assert.NoError(t, err) 86 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 87 assert.NoError(t, err) 88 89 hb := pb.LogStoreHeartbeat{ 90 UUID: "uuid1", 91 RaftAddress: "localhost:9090", 92 ServiceAddress: "localhost:9091", 93 GossipAddress: "localhost:9092", 94 Replicas: []pb.LogReplicaInfo{ 95 { 96 LogShardInfo: pb.LogShardInfo{ 97 ShardID: 100, 98 Replicas: map[uint64]string{ 99 200: "localhost:8000", 100 300: "localhost:9000", 101 }, 102 Epoch: 200, 103 LeaderID: 200, 104 Term: 10, 105 }, 106 }, 107 { 108 LogShardInfo: pb.LogShardInfo{ 109 ShardID: 101, 110 Replicas: map[uint64]string{ 111 201: "localhost:8000", 112 301: "localhost:9000", 113 }, 114 Epoch: 202, 115 LeaderID: 201, 116 Term: 30, 117 }, 118 }, 119 }, 120 } 121 data, err := hb.Marshal() 122 require.NoError(t, err) 123 cmd = GetLogStoreHeartbeatCmd(data) 124 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 125 assert.NoError(t, err) 126 s := tsm1.state.LogState 127 assert.Equal(t, 1, len(s.Stores)) 128 lsinfo, ok := s.Stores[hb.UUID] 129 require.True(t, ok) 130 assert.Equal(t, uint64(3), lsinfo.Tick) 131 assert.Equal(t, hb.RaftAddress, lsinfo.RaftAddress) 132 assert.Equal(t, hb.ServiceAddress, lsinfo.ServiceAddress) 133 assert.Equal(t, hb.GossipAddress, lsinfo.GossipAddress) 134 assert.Equal(t, 2, len(lsinfo.Replicas)) 135 assert.Equal(t, hb.Replicas, lsinfo.Replicas) 136 } 137 138 func TestHandleDNHeartbeat(t *testing.T) { 139 tsm1 := NewStateMachine(0, 1).(*stateMachine) 140 cmd := GetTickCmd() 141 _, err := tsm1.Update(sm.Entry{Cmd: cmd}) 142 assert.NoError(t, err) 143 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 144 assert.NoError(t, err) 145 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 146 assert.NoError(t, err) 147 148 hb := pb.DNStoreHeartbeat{ 149 UUID: "uuid1", 150 Shards: []pb.DNShardInfo{ 151 {ShardID: 1, ReplicaID: 1}, 152 {ShardID: 2, ReplicaID: 1}, 153 {ShardID: 3, ReplicaID: 1}, 154 }, 155 } 156 data, err := hb.Marshal() 157 require.NoError(t, err) 158 cmd = GetDNStoreHeartbeatCmd(data) 159 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 160 assert.NoError(t, err) 161 s := tsm1.state.DNState 162 assert.Equal(t, 1, len(s.Stores)) 163 dninfo, ok := s.Stores[hb.UUID] 164 assert.True(t, ok) 165 assert.Equal(t, uint64(3), dninfo.Tick) 166 require.Equal(t, 3, len(dninfo.Shards)) 167 assert.Equal(t, hb.Shards, dninfo.Shards) 168 } 169 170 func TestHandleCNHeartbeat(t *testing.T) { 171 tsm1 := NewStateMachine(0, 1).(*stateMachine) 172 cmd := GetTickCmd() 173 _, err := tsm1.Update(sm.Entry{Cmd: cmd}) 174 assert.NoError(t, err) 175 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 176 assert.NoError(t, err) 177 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 178 assert.NoError(t, err) 179 180 hb := pb.CNStoreHeartbeat{ 181 UUID: "uuid1", 182 } 183 data, err := hb.Marshal() 184 require.NoError(t, err) 185 cmd = GetCNStoreHeartbeatCmd(data) 186 _, err = tsm1.Update(sm.Entry{Cmd: cmd}) 187 assert.NoError(t, err) 188 s := tsm1.state.CNState 189 assert.Equal(t, 1, len(s.Stores)) 190 cninfo, ok := s.Stores[hb.UUID] 191 assert.True(t, ok) 192 assert.Equal(t, uint64(3), cninfo.Tick) 193 } 194 195 func TestGetIDCmd(t *testing.T) { 196 tsm1 := NewStateMachine(0, 1).(*stateMachine) 197 tsm1.state.State = pb.HAKeeperRunning 198 cmd := GetGetIDCmd(100) 199 result, err := tsm1.Update(sm.Entry{Cmd: cmd}) 200 assert.NoError(t, err) 201 assert.Equal(t, sm.Result{Value: 1}, result) 202 result, err = tsm1.Update(sm.Entry{Cmd: cmd}) 203 assert.NoError(t, err) 204 assert.Equal(t, sm.Result{Value: 101}, result) 205 assert.Equal(t, uint64(201), tsm1.assignID()) 206 207 result, err = tsm1.Update(sm.Entry{Cmd: cmd}) 208 assert.NoError(t, err) 209 assert.Equal(t, sm.Result{Value: 202}, result) 210 } 211 212 func TestUpdateScheduleCommandsCmd(t *testing.T) { 213 tsm1 := NewStateMachine(0, 1).(*stateMachine) 214 sc1 := pb.ScheduleCommand{ 215 UUID: "uuid1", 216 ConfigChange: &pb.ConfigChange{ 217 Replica: pb.Replica{ 218 ShardID: 1, 219 }, 220 }, 221 } 222 sc2 := pb.ScheduleCommand{ 223 UUID: "uuid2", 224 ConfigChange: &pb.ConfigChange{ 225 Replica: pb.Replica{ 226 ShardID: 2, 227 }, 228 }, 229 } 230 sc3 := pb.ScheduleCommand{ 231 UUID: "uuid1", 232 ConfigChange: &pb.ConfigChange{ 233 Replica: pb.Replica{ 234 ShardID: 3, 235 }, 236 }, 237 } 238 sc4 := pb.ScheduleCommand{ 239 UUID: "uuid3", 240 ConfigChange: &pb.ConfigChange{ 241 Replica: pb.Replica{ 242 ShardID: 4, 243 }, 244 }, 245 } 246 247 b := pb.CommandBatch{ 248 Term: 101, 249 Commands: []pb.ScheduleCommand{sc1, sc2, sc3}, 250 } 251 cmd := GetUpdateCommandsCmd(b.Term, b.Commands) 252 result, err := tsm1.Update(sm.Entry{Cmd: cmd}) 253 require.NoError(t, err) 254 assert.Equal(t, sm.Result{}, result) 255 assert.Equal(t, b.Term, tsm1.state.Term) 256 require.Equal(t, 2, len(tsm1.state.ScheduleCommands)) 257 l1, ok := tsm1.state.ScheduleCommands["uuid1"] 258 assert.True(t, ok) 259 assert.Equal(t, pb.CommandBatch{Commands: []pb.ScheduleCommand{sc1, sc3}}, l1) 260 l2, ok := tsm1.state.ScheduleCommands["uuid2"] 261 assert.True(t, ok) 262 assert.Equal(t, pb.CommandBatch{Commands: []pb.ScheduleCommand{sc2}}, l2) 263 264 cmd2 := GetUpdateCommandsCmd(b.Term-1, 265 []pb.ScheduleCommand{sc1, sc2, sc3, sc4}) 266 result, err = tsm1.Update(sm.Entry{Cmd: cmd2}) 267 require.NoError(t, err) 268 assert.Equal(t, sm.Result{}, result) 269 assert.Equal(t, b.Term, tsm1.state.Term) 270 require.Equal(t, 2, len(tsm1.state.ScheduleCommands)) 271 l1, ok = tsm1.state.ScheduleCommands["uuid1"] 272 assert.True(t, ok) 273 assert.Equal(t, pb.CommandBatch{Commands: []pb.ScheduleCommand{sc1, sc3}}, l1) 274 l2, ok = tsm1.state.ScheduleCommands["uuid2"] 275 assert.True(t, ok) 276 assert.Equal(t, pb.CommandBatch{Commands: []pb.ScheduleCommand{sc2}}, l2) 277 } 278 279 func TestScheduleCommandQuery(t *testing.T) { 280 tsm1 := NewStateMachine(0, 1).(*stateMachine) 281 sc1 := pb.ScheduleCommand{ 282 UUID: "uuid1", 283 ConfigChange: &pb.ConfigChange{ 284 Replica: pb.Replica{ 285 ShardID: 1, 286 }, 287 }, 288 } 289 sc2 := pb.ScheduleCommand{ 290 UUID: "uuid2", 291 ConfigChange: &pb.ConfigChange{ 292 Replica: pb.Replica{ 293 ShardID: 2, 294 }, 295 }, 296 } 297 sc3 := pb.ScheduleCommand{ 298 UUID: "uuid1", 299 ConfigChange: &pb.ConfigChange{ 300 Replica: pb.Replica{ 301 ShardID: 3, 302 }, 303 }, 304 } 305 b := pb.CommandBatch{ 306 Term: 101, 307 Commands: []pb.ScheduleCommand{sc1, sc2, sc3}, 308 } 309 cmd := GetUpdateCommandsCmd(b.Term, b.Commands) 310 _, err := tsm1.Update(sm.Entry{Cmd: cmd}) 311 require.NoError(t, err) 312 r, err := tsm1.Lookup(&ScheduleCommandQuery{UUID: "uuid1"}) 313 require.NoError(t, err) 314 cb, ok := r.(*pb.CommandBatch) 315 require.True(t, ok) 316 assert.Equal(t, 2, len(cb.Commands)) 317 b = pb.CommandBatch{ 318 Commands: []pb.ScheduleCommand{sc1, sc3}, 319 } 320 assert.Equal(t, b, *cb) 321 } 322 323 func TestClusterDetailsQuery(t *testing.T) { 324 tsm := NewStateMachine(0, 1).(*stateMachine) 325 tsm.state.CNState = pb.CNState{ 326 Stores: make(map[string]pb.CNStoreInfo), 327 } 328 tsm.state.CNState.Stores["uuid1"] = pb.CNStoreInfo{ 329 Tick: 1, 330 ServiceAddress: "addr1", 331 } 332 tsm.state.CNState.Stores["uuid2"] = pb.CNStoreInfo{ 333 Tick: 2, 334 ServiceAddress: "addr2", 335 } 336 tsm.state.DNState = pb.DNState{ 337 Stores: make(map[string]pb.DNStoreInfo), 338 } 339 tsm.state.DNState.Stores["uuid3"] = pb.DNStoreInfo{ 340 Tick: 3, 341 ServiceAddress: "addr3", 342 Shards: []pb.DNShardInfo{ 343 { 344 ShardID: 2, 345 ReplicaID: 1, 346 }, 347 }, 348 LogtailServerAddress: "addr4", 349 } 350 tsm.state.LogState.Shards[1] = pb.LogShardInfo{ 351 ShardID: 1, 352 Replicas: map[uint64]string{1: "store-1", 2: "store-2", 3: "store-3"}, 353 Epoch: 1, LeaderID: 1, Term: 1, 354 } 355 356 tsm.state.LogState.Stores["store-1"] = pb.LogStoreInfo{ 357 Tick: 100, 358 ServiceAddress: "addr-log-1", 359 Replicas: []pb.LogReplicaInfo{{ 360 LogShardInfo: pb.LogShardInfo{ 361 ShardID: 1, 362 Replicas: map[uint64]string{1: "store-1", 2: "store-2", 3: "store-3"}, 363 Epoch: 1, LeaderID: 1, Term: 1, 364 }, ReplicaID: 1, 365 }}, 366 } 367 368 tsm.state.LogState.Stores["store-2"] = pb.LogStoreInfo{ 369 Tick: 100, 370 ServiceAddress: "addr-log-2", 371 Replicas: []pb.LogReplicaInfo{{ 372 LogShardInfo: pb.LogShardInfo{ 373 ShardID: 1, 374 Replicas: map[uint64]string{1: "store-1", 2: "store-2", 3: "store-3"}, 375 Epoch: 1, LeaderID: 1, Term: 1, 376 }, ReplicaID: 2, 377 }}, 378 } 379 380 tsm.state.LogState.Stores["store-3"] = pb.LogStoreInfo{ 381 Tick: 100, 382 ServiceAddress: "addr-log-3", 383 Replicas: []pb.LogReplicaInfo{{ 384 LogShardInfo: pb.LogShardInfo{ 385 ShardID: 1, 386 Replicas: map[uint64]string{1: "store-1", 2: "store-2", 3: "store-3"}, 387 Epoch: 1, LeaderID: 1, Term: 1, 388 }, ReplicaID: 3, 389 }}, 390 } 391 392 v, err := tsm.Lookup(&ClusterDetailsQuery{}) 393 require.NoError(t, err) 394 expected := &pb.ClusterDetails{ 395 DNStores: []pb.DNStore{ 396 { 397 UUID: "uuid3", 398 Tick: 3, 399 ServiceAddress: "addr3", 400 Shards: []pb.DNShardInfo{ 401 { 402 ShardID: 2, 403 ReplicaID: 1, 404 }, 405 }, 406 LogtailServerAddress: "addr4", 407 }, 408 }, 409 CNStores: []pb.CNStore{ 410 { 411 UUID: "uuid1", 412 Tick: 1, 413 ServiceAddress: "addr1", 414 }, 415 { 416 UUID: "uuid2", 417 Tick: 2, 418 ServiceAddress: "addr2", 419 }, 420 }, 421 LogStores: []pb.LogStore{ 422 { 423 UUID: "store-1", 424 ServiceAddress: "addr-log-1", 425 Tick: 100, 426 State: 0, 427 Replicas: []pb.LogReplicaInfo{{ 428 LogShardInfo: pb.LogShardInfo{ 429 ShardID: 1, 430 Replicas: map[uint64]string{1: "store-1", 2: "store-2", 3: "store-3"}, 431 Epoch: 1, LeaderID: 1, Term: 1, 432 }, ReplicaID: 1, 433 }}, 434 }, 435 { 436 UUID: "store-2", 437 ServiceAddress: "addr-log-2", 438 Tick: 100, 439 State: 0, 440 Replicas: []pb.LogReplicaInfo{{ 441 LogShardInfo: pb.LogShardInfo{ 442 ShardID: 1, 443 Replicas: map[uint64]string{1: "store-1", 2: "store-2", 3: "store-3"}, 444 Epoch: 1, LeaderID: 1, Term: 1, 445 }, ReplicaID: 2, 446 }}, 447 }, 448 { 449 UUID: "store-3", 450 ServiceAddress: "addr-log-3", 451 Tick: 100, 452 State: 0, 453 Replicas: []pb.LogReplicaInfo{{ 454 LogShardInfo: pb.LogShardInfo{ 455 ShardID: 1, 456 Replicas: map[uint64]string{1: "store-1", 2: "store-2", 3: "store-3"}, 457 Epoch: 1, LeaderID: 1, Term: 1, 458 }, ReplicaID: 3, 459 }}, 460 }, 461 }, 462 } 463 result := v.(*pb.ClusterDetails) 464 sort.Slice(result.CNStores, func(i, j int) bool { 465 return result.CNStores[i].UUID < result.CNStores[j].UUID 466 }) 467 sort.Slice(result.LogStores, func(i, j int) bool { 468 return result.LogStores[i].UUID < result.LogStores[j].UUID 469 }) 470 assert.Equal(t, expected, result) 471 } 472 473 func TestInitialState(t *testing.T) { 474 rsm := NewStateMachine(0, 1).(*stateMachine) 475 assert.Equal(t, pb.HAKeeperCreated, rsm.state.State) 476 } 477 478 func TestSetState(t *testing.T) { 479 tests := []struct { 480 initialState pb.HAKeeperState 481 newState pb.HAKeeperState 482 result pb.HAKeeperState 483 }{ 484 {pb.HAKeeperCreated, pb.HAKeeperBootstrapping, pb.HAKeeperCreated}, 485 {pb.HAKeeperCreated, pb.HAKeeperBootstrapFailed, pb.HAKeeperCreated}, 486 {pb.HAKeeperCreated, pb.HAKeeperRunning, pb.HAKeeperCreated}, 487 {pb.HAKeeperCreated, pb.HAKeeperCreated, pb.HAKeeperCreated}, 488 {pb.HAKeeperCreated, pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperCreated}, 489 490 {pb.HAKeeperBootstrapping, pb.HAKeeperCreated, pb.HAKeeperBootstrapping}, 491 {pb.HAKeeperBootstrapping, pb.HAKeeperBootstrapFailed, pb.HAKeeperBootstrapping}, 492 {pb.HAKeeperBootstrapping, pb.HAKeeperRunning, pb.HAKeeperBootstrapping}, 493 {pb.HAKeeperBootstrapping, pb.HAKeeperBootstrapping, pb.HAKeeperBootstrapping}, 494 {pb.HAKeeperBootstrapping, pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperBootstrapCommandsReceived}, 495 496 {pb.HAKeeperBootstrapFailed, pb.HAKeeperBootstrapFailed, pb.HAKeeperBootstrapFailed}, 497 {pb.HAKeeperBootstrapFailed, pb.HAKeeperCreated, pb.HAKeeperBootstrapFailed}, 498 {pb.HAKeeperBootstrapFailed, pb.HAKeeperBootstrapping, pb.HAKeeperBootstrapFailed}, 499 {pb.HAKeeperBootstrapFailed, pb.HAKeeperRunning, pb.HAKeeperBootstrapFailed}, 500 {pb.HAKeeperBootstrapFailed, pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperBootstrapFailed}, 501 502 {pb.HAKeeperRunning, pb.HAKeeperRunning, pb.HAKeeperRunning}, 503 {pb.HAKeeperRunning, pb.HAKeeperCreated, pb.HAKeeperRunning}, 504 {pb.HAKeeperRunning, pb.HAKeeperBootstrapping, pb.HAKeeperRunning}, 505 {pb.HAKeeperRunning, pb.HAKeeperBootstrapFailed, pb.HAKeeperRunning}, 506 {pb.HAKeeperRunning, pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperRunning}, 507 508 {pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperCreated, pb.HAKeeperBootstrapCommandsReceived}, 509 {pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperBootstrapping, pb.HAKeeperBootstrapCommandsReceived}, 510 {pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperBootstrapCommandsReceived}, 511 {pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperBootstrapFailed, pb.HAKeeperBootstrapFailed}, 512 {pb.HAKeeperBootstrapCommandsReceived, pb.HAKeeperRunning, pb.HAKeeperRunning}, 513 } 514 515 for _, tt := range tests { 516 rsm := stateMachine{ 517 state: pb.HAKeeperRSMState{ 518 State: tt.initialState, 519 }, 520 } 521 cmd := GetSetStateCmd(tt.newState) 522 _, err := rsm.Update(sm.Entry{Cmd: cmd}) 523 require.NoError(t, err) 524 assert.Equal(t, tt.result, rsm.state.State) 525 } 526 } 527 528 func TestSetTaskSchedulerState(t *testing.T) { 529 tests := []struct { 530 initialState pb.TaskSchedulerState 531 newState pb.TaskSchedulerState 532 result pb.TaskSchedulerState 533 }{ 534 {pb.TaskSchedulerCreated, pb.TaskSchedulerCreated, pb.TaskSchedulerCreated}, 535 {pb.TaskSchedulerCreated, pb.TaskSchedulerRunning, pb.TaskSchedulerCreated}, 536 {pb.TaskSchedulerCreated, pb.TaskSchedulerStopped, pb.TaskSchedulerCreated}, 537 538 {pb.TaskSchedulerRunning, pb.TaskSchedulerCreated, pb.TaskSchedulerRunning}, 539 {pb.TaskSchedulerRunning, pb.TaskSchedulerRunning, pb.TaskSchedulerRunning}, 540 {pb.TaskSchedulerRunning, pb.TaskSchedulerStopped, pb.TaskSchedulerStopped}, 541 542 {pb.TaskSchedulerStopped, pb.TaskSchedulerCreated, pb.TaskSchedulerStopped}, 543 {pb.TaskSchedulerStopped, pb.TaskSchedulerRunning, pb.TaskSchedulerRunning}, 544 {pb.TaskSchedulerStopped, pb.TaskSchedulerStopped, pb.TaskSchedulerStopped}, 545 } 546 547 for _, tt := range tests { 548 rsm := stateMachine{ 549 state: pb.HAKeeperRSMState{ 550 State: pb.HAKeeperRunning, 551 TaskSchedulerState: tt.initialState, 552 }, 553 } 554 cmd := GetSetTaskSchedulerStateCmd(tt.newState) 555 _, err := rsm.Update(sm.Entry{Cmd: cmd}) 556 require.NoError(t, err) 557 assert.Equal(t, tt.result, rsm.state.TaskSchedulerState) 558 } 559 } 560 561 func TestInitialClusterRequestCmd(t *testing.T) { 562 cmd := GetInitialClusterRequestCmd(2, 2, 3) 563 req := parseInitialClusterRequestCmd(cmd) 564 assert.Equal(t, uint64(2), req.NumOfLogShards) 565 assert.Equal(t, uint64(2), req.NumOfDNShards) 566 assert.Equal(t, uint64(3), req.NumOfLogReplicas) 567 } 568 569 func TestHandleInitialClusterRequestCmd(t *testing.T) { 570 cmd := GetInitialClusterRequestCmd(2, 2, 3) 571 rsm := NewStateMachine(0, 1).(*stateMachine) 572 result, err := rsm.Update(sm.Entry{Cmd: cmd}) 573 require.NoError(t, err) 574 assert.Equal(t, sm.Result{Value: 0}, result) 575 576 expected := pb.ClusterInfo{ 577 LogShards: []metadata.LogShardRecord{ 578 { 579 ShardID: 0, 580 NumberOfReplicas: 3, 581 }, 582 { 583 ShardID: 1, 584 NumberOfReplicas: 3, 585 }, 586 { 587 ShardID: 3, 588 NumberOfReplicas: 3, 589 }, 590 }, 591 DNShards: []metadata.DNShardRecord{ 592 { 593 ShardID: 2, 594 LogShardID: 1, 595 }, 596 { 597 ShardID: 4, 598 LogShardID: 3, 599 }, 600 }, 601 } 602 603 assert.Equal(t, expected, rsm.state.ClusterInfo) 604 assert.Equal(t, pb.HAKeeperBootstrapping, rsm.state.State) 605 assert.Equal(t, K8SIDRangeEnd, rsm.state.NextID) 606 } 607 608 func TestGetCommandBatch(t *testing.T) { 609 rsm := NewStateMachine(0, 1).(*stateMachine) 610 cb := pb.CommandBatch{ 611 Term: 12345, 612 } 613 rsm.state.ScheduleCommands["uuid1"] = cb 614 result := rsm.getCommandBatch("uuid1") 615 var ncb pb.CommandBatch 616 require.NoError(t, ncb.Unmarshal(result.Data)) 617 assert.Equal(t, cb, ncb) 618 _, ok := rsm.state.ScheduleCommands["uuid1"] 619 assert.False(t, ok) 620 }