github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/cdc/scheduler/internal/v3/coordinator_bench_test.go (about)

     1  // Copyright 2022 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package v3
    15  
    16  import (
    17  	"context"
    18  	"fmt"
    19  	"math"
    20  	"testing"
    21  
    22  	"github.com/pingcap/log"
    23  	"github.com/pingcap/tiflow/cdc/model"
    24  	"github.com/pingcap/tiflow/cdc/processor/tablepb"
    25  	"github.com/pingcap/tiflow/cdc/scheduler/internal/v3/member"
    26  	"github.com/pingcap/tiflow/cdc/scheduler/internal/v3/replication"
    27  	"github.com/pingcap/tiflow/cdc/scheduler/internal/v3/transport"
    28  	"github.com/pingcap/tiflow/cdc/scheduler/schedulepb"
    29  	"github.com/pingcap/tiflow/pkg/config"
    30  	"go.uber.org/zap/zapcore"
    31  )
    32  
    33  func benchmarkCoordinator(
    34  	b *testing.B,
    35  	factory func(total int) (
    36  		name string,
    37  		coord *coordinator,
    38  		currentTables []model.TableID,
    39  		captures map[model.CaptureID]*model.CaptureInfo,
    40  	),
    41  ) {
    42  	log.SetLevel(zapcore.DPanicLevel)
    43  	ctx := context.Background()
    44  	size := 131072 // 2^17
    45  	for total := 1; total <= size; total *= 2 {
    46  		name, coord, currentTables, captures := factory(total)
    47  		b.ResetTimer()
    48  		b.Run(name, func(b *testing.B) {
    49  			for i := 0; i < b.N; i++ {
    50  				coord.poll(ctx, 0, currentTables, captures, schedulepb.NewBarrierWithMinTs(0))
    51  			}
    52  		})
    53  		b.StopTimer()
    54  	}
    55  }
    56  
    57  func BenchmarkCoordinatorInit(b *testing.B) {
    58  	benchmarkCoordinator(b, func(total int) (
    59  		name string,
    60  		coord *coordinator,
    61  		currentTables []model.TableID,
    62  		captures map[model.CaptureID]*model.CaptureInfo,
    63  	) {
    64  		const captureCount = 8
    65  		captures = map[model.CaptureID]*model.CaptureInfo{}
    66  		for i := 0; i < captureCount; i++ {
    67  			captures[fmt.Sprint(i)] = &model.CaptureInfo{}
    68  		}
    69  		currentTables = make([]model.TableID, 0, total)
    70  		for i := 0; i < total; i++ {
    71  			currentTables = append(currentTables, int64(10000+i))
    72  		}
    73  		// Disable heartbeat.
    74  		cfg := config.NewDefaultSchedulerConfig()
    75  		cfg.HeartbeatTick = math.MaxInt
    76  		coord = &coordinator{
    77  			trans:        transport.NewMockTrans(),
    78  			replicationM: replication.NewReplicationManager(10, model.ChangeFeedID{}),
    79  			captureM: member.NewCaptureManager(
    80  				"", model.ChangeFeedID{}, schedulepb.OwnerRevision{}, cfg),
    81  		}
    82  		name = fmt.Sprintf("InitTable %d", total)
    83  		return name, coord, currentTables, captures
    84  	})
    85  }
    86  
    87  func BenchmarkCoordinatorHeartbeat(b *testing.B) {
    88  	benchmarkCoordinator(b, func(total int) (
    89  		name string,
    90  		coord *coordinator,
    91  		currentTables []model.TableID,
    92  		captures map[model.CaptureID]*model.CaptureInfo,
    93  	) {
    94  		const captureCount = 8
    95  		captures = map[model.CaptureID]*model.CaptureInfo{}
    96  		// Always heartbeat.
    97  		cfg := config.NewDefaultSchedulerConfig()
    98  		cfg.HeartbeatTick = 1
    99  		captureM := member.NewCaptureManager(
   100  			"", model.ChangeFeedID{}, schedulepb.OwnerRevision{}, cfg)
   101  		captureM.SetInitializedForTests(true)
   102  		for i := 0; i < captureCount; i++ {
   103  			captures[fmt.Sprint(i)] = &model.CaptureInfo{}
   104  			captureM.Captures[fmt.Sprint(i)] = &member.CaptureStatus{
   105  				State: member.CaptureStateInitialized,
   106  			}
   107  		}
   108  		currentTables = make([]model.TableID, 0, total)
   109  		for i := 0; i < total; i++ {
   110  			currentTables = append(currentTables, int64(10000+i))
   111  		}
   112  		coord = &coordinator{
   113  			trans:        transport.NewMockTrans(),
   114  			replicationM: replication.NewReplicationManager(10, model.ChangeFeedID{}),
   115  			captureM:     captureM,
   116  		}
   117  		name = fmt.Sprintf("Heartbeat %d", total)
   118  		return name, coord, currentTables, captures
   119  	})
   120  }
   121  
   122  func BenchmarkCoordinatorHeartbeatResponse(b *testing.B) {
   123  	benchmarkCoordinator(b, func(total int) (
   124  		name string,
   125  		coord *coordinator,
   126  		currentTables []model.TableID,
   127  		captures map[model.CaptureID]*model.CaptureInfo,
   128  	) {
   129  		const captureCount = 8
   130  		captures = map[model.CaptureID]*model.CaptureInfo{}
   131  		// Disable heartbeat.
   132  		cfg := config.NewDefaultSchedulerConfig()
   133  		cfg.HeartbeatTick = math.MaxInt
   134  		captureM := member.NewCaptureManager(
   135  			"", model.ChangeFeedID{}, schedulepb.OwnerRevision{}, cfg)
   136  		captureM.SetInitializedForTests(true)
   137  		for i := 0; i < captureCount; i++ {
   138  			captures[fmt.Sprint(i)] = &model.CaptureInfo{}
   139  			captureM.Captures[fmt.Sprint(i)] = &member.CaptureStatus{
   140  				State: member.CaptureStateInitialized,
   141  			}
   142  		}
   143  		replicationM := replication.NewReplicationManager(10, model.ChangeFeedID{})
   144  		currentTables = make([]model.TableID, 0, total)
   145  		heartbeatResp := make(map[model.CaptureID]*schedulepb.Message)
   146  		for i := 0; i < total; i++ {
   147  			tableID := int64(10000 + i)
   148  			currentTables = append(currentTables, tableID)
   149  			captureID := fmt.Sprint(i % captureCount)
   150  			span := tablepb.Span{TableID: tableID}
   151  			rep, err := replication.NewReplicationSet(
   152  				span, 0, map[string]*tablepb.TableStatus{
   153  					captureID: {
   154  						Span:  tablepb.Span{TableID: tableID},
   155  						State: tablepb.TableStateReplicating,
   156  					},
   157  				}, model.ChangeFeedID{})
   158  			if err != nil {
   159  				b.Fatal(err)
   160  			}
   161  			replicationM.SetReplicationSetForTests(rep)
   162  			_, ok := heartbeatResp[captureID]
   163  			if !ok {
   164  				heartbeatResp[captureID] = &schedulepb.Message{
   165  					Header:            &schedulepb.Message_Header{},
   166  					From:              captureID,
   167  					MsgType:           schedulepb.MsgHeartbeatResponse,
   168  					HeartbeatResponse: &schedulepb.HeartbeatResponse{},
   169  				}
   170  			}
   171  			heartbeatResp[captureID].HeartbeatResponse.Tables = append(
   172  				heartbeatResp[captureID].HeartbeatResponse.Tables,
   173  				tablepb.TableStatus{
   174  					Span:  tablepb.Span{TableID: tableID},
   175  					State: tablepb.TableStateReplicating,
   176  				})
   177  		}
   178  		recvMsgs := make([]*schedulepb.Message, 0, len(heartbeatResp))
   179  		for _, resp := range heartbeatResp {
   180  			recvMsgs = append(recvMsgs, resp)
   181  		}
   182  		trans := &transport.MockTrans{
   183  			RecvBuffer:     recvMsgs,
   184  			KeepRecvBuffer: true,
   185  		}
   186  		coord = &coordinator{
   187  			trans:        trans,
   188  			replicationM: replicationM,
   189  			captureM:     captureM,
   190  		}
   191  		name = fmt.Sprintf("HeartbeatResponse %d", total)
   192  		return name, coord, currentTables, captures
   193  	})
   194  }