github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/cdc/api/v2/model.go (about) 1 // Copyright 2022 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package v2 15 16 import ( 17 "encoding/json" 18 "time" 19 20 "github.com/pingcap/errors" 21 "github.com/pingcap/tiflow/cdc/model" 22 bf "github.com/pingcap/tiflow/pkg/binlog-filter" 23 "github.com/pingcap/tiflow/pkg/config" 24 cerror "github.com/pingcap/tiflow/pkg/errors" 25 "github.com/pingcap/tiflow/pkg/integrity" 26 "github.com/pingcap/tiflow/pkg/security" 27 "github.com/pingcap/tiflow/pkg/util" 28 ) 29 30 // EmptyResponse return empty {} to http client 31 type EmptyResponse struct{} 32 33 // LogLevelReq log level request 34 type LogLevelReq struct { 35 Level string `json:"log_level"` 36 } 37 38 // ListResponse is the response for all List APIs 39 type ListResponse[T any] struct { 40 Total int `json:"total"` 41 Items []T `json:"items"` 42 } 43 44 // Tso contains timestamp get from PD 45 type Tso struct { 46 Timestamp int64 `json:"timestamp"` 47 LogicTime int64 `json:"logic_time"` 48 } 49 50 // Tables contains IneligibleTables and EligibleTables 51 type Tables struct { 52 IneligibleTables []TableName `json:"ineligible_tables,omitempty"` 53 EligibleTables []TableName `json:"eligible_tables,omitempty"` 54 } 55 56 // TableName contains table information 57 type TableName struct { 58 Schema string `json:"database_name"` 59 Table string `json:"table_name"` 60 TableID int64 `json:"table_id"` 61 IsPartition bool `json:"is_partition"` 62 } 63 64 // VerifyTableConfig use to verify tables. 65 // Only use by Open API v2. 66 type VerifyTableConfig struct { 67 PDConfig 68 ReplicaConfig *ReplicaConfig `json:"replica_config"` 69 StartTs uint64 `json:"start_ts"` 70 SinkURI string `json:"sink_uri"` 71 } 72 73 func getDefaultVerifyTableConfig() *VerifyTableConfig { 74 return &VerifyTableConfig{ 75 ReplicaConfig: GetDefaultReplicaConfig(), 76 } 77 } 78 79 // ResumeChangefeedConfig is used by resume changefeed api 80 type ResumeChangefeedConfig struct { 81 PDConfig 82 OverwriteCheckpointTs uint64 `json:"overwrite_checkpoint_ts"` 83 } 84 85 // PDConfig is a configuration used to connect to pd 86 type PDConfig struct { 87 PDAddrs []string `json:"pd_addrs,omitempty"` 88 CAPath string `json:"ca_path"` 89 CertPath string `json:"cert_path"` 90 KeyPath string `json:"key_path"` 91 CertAllowedCN []string `json:"cert_allowed_cn,omitempty"` 92 } 93 94 // ChangefeedCommonInfo holds some common usage information of a changefeed 95 type ChangefeedCommonInfo struct { 96 UpstreamID uint64 `json:"upstream_id"` 97 Namespace string `json:"namespace"` 98 ID string `json:"id"` 99 FeedState model.FeedState `json:"state"` 100 CheckpointTSO uint64 `json:"checkpoint_tso"` 101 CheckpointTime model.JSONTime `json:"checkpoint_time"` 102 RunningError *model.RunningError `json:"error"` 103 } 104 105 // SyncedStatusConfig represents synced check interval config for a changefeed 106 type SyncedStatusConfig struct { 107 // The minimum interval between the latest synced ts and now required to reach synced state 108 SyncedCheckInterval int64 `json:"synced_check_interval"` 109 // The maximum interval between latest checkpoint ts and now or 110 // between latest sink's checkpoint ts and puller's checkpoint ts required to reach synced state 111 CheckpointInterval int64 `json:"checkpoint_interval"` 112 } 113 114 // MarshalJSON marshal changefeed common info to json 115 // we need to set feed state to normal if it is uninitialized and pending to warning 116 // to hide the detail of uninitialized and pending state from user 117 func (c ChangefeedCommonInfo) MarshalJSON() ([]byte, error) { 118 // alias the original type to prevent recursive call of MarshalJSON 119 type Alias ChangefeedCommonInfo 120 121 if c.FeedState == model.StateUnInitialized { 122 c.FeedState = model.StateNormal 123 } 124 if c.FeedState == model.StatePending { 125 c.FeedState = model.StateWarning 126 } 127 128 return json.Marshal(struct { 129 Alias 130 }{ 131 Alias: Alias(c), 132 }) 133 } 134 135 // ChangefeedConfig use by create changefeed api 136 type ChangefeedConfig struct { 137 Namespace string `json:"namespace"` 138 ID string `json:"changefeed_id"` 139 StartTs uint64 `json:"start_ts"` 140 TargetTs uint64 `json:"target_ts"` 141 SinkURI string `json:"sink_uri"` 142 ReplicaConfig *ReplicaConfig `json:"replica_config"` 143 PDConfig 144 } 145 146 // ProcessorCommonInfo holds the common info of a processor 147 type ProcessorCommonInfo struct { 148 Namespace string `json:"namespace"` 149 ChangeFeedID string `json:"changefeed_id"` 150 CaptureID string `json:"capture_id"` 151 } 152 153 // JSONDuration used to wrap duration into json format 154 type JSONDuration struct { 155 duration time.Duration 156 } 157 158 // MarshalJSON marshal duration to string 159 func (d JSONDuration) MarshalJSON() ([]byte, error) { 160 return json.Marshal(d.duration.Nanoseconds()) 161 } 162 163 // UnmarshalJSON unmarshal json value to wrapped duration 164 func (d *JSONDuration) UnmarshalJSON(b []byte) error { 165 var v interface{} 166 if err := json.Unmarshal(b, &v); err != nil { 167 return err 168 } 169 switch value := v.(type) { 170 case float64: 171 d.duration = time.Duration(value) 172 return nil 173 case string: 174 var err error 175 d.duration, err = time.ParseDuration(value) 176 if err != nil { 177 return err 178 } 179 return nil 180 default: 181 return errors.New("invalid duration") 182 } 183 } 184 185 // ReplicaConfig is a duplicate of config.ReplicaConfig 186 type ReplicaConfig struct { 187 MemoryQuota uint64 `json:"memory_quota"` 188 CaseSensitive bool `json:"case_sensitive"` 189 ForceReplicate bool `json:"force_replicate"` 190 IgnoreIneligibleTable bool `json:"ignore_ineligible_table"` 191 CheckGCSafePoint bool `json:"check_gc_safe_point"` 192 EnableSyncPoint *bool `json:"enable_sync_point,omitempty"` 193 EnableTableMonitor *bool `json:"enable_table_monitor,omitempty"` 194 BDRMode *bool `json:"bdr_mode,omitempty"` 195 196 SyncPointInterval *JSONDuration `json:"sync_point_interval,omitempty" swaggertype:"string"` 197 SyncPointRetention *JSONDuration `json:"sync_point_retention,omitempty" swaggertype:"string"` 198 199 Filter *FilterConfig `json:"filter"` 200 Mounter *MounterConfig `json:"mounter"` 201 Sink *SinkConfig `json:"sink"` 202 Consistent *ConsistentConfig `json:"consistent,omitempty"` 203 Scheduler *ChangefeedSchedulerConfig `json:"scheduler"` 204 Integrity *IntegrityConfig `json:"integrity"` 205 ChangefeedErrorStuckDuration *JSONDuration `json:"changefeed_error_stuck_duration,omitempty"` 206 SyncedStatus *SyncedStatusConfig `json:"synced_status,omitempty"` 207 208 // Deprecated: we don't use this field since v8.0.0. 209 SQLMode string `json:"sql_mode,omitempty"` 210 } 211 212 // ToInternalReplicaConfig coverts *v2.ReplicaConfig into *config.ReplicaConfig 213 func (c *ReplicaConfig) ToInternalReplicaConfig() *config.ReplicaConfig { 214 return c.toInternalReplicaConfigWithOriginConfig(config.GetDefaultReplicaConfig()) 215 } 216 217 // ToInternalReplicaConfigWithOriginConfig coverts *v2.ReplicaConfig into *config.ReplicaConfig 218 func (c *ReplicaConfig) toInternalReplicaConfigWithOriginConfig( 219 res *config.ReplicaConfig, 220 ) *config.ReplicaConfig { 221 res.MemoryQuota = c.MemoryQuota 222 res.CaseSensitive = c.CaseSensitive 223 res.ForceReplicate = c.ForceReplicate 224 res.CheckGCSafePoint = c.CheckGCSafePoint 225 res.EnableSyncPoint = c.EnableSyncPoint 226 res.EnableTableMonitor = c.EnableTableMonitor 227 res.IgnoreIneligibleTable = c.IgnoreIneligibleTable 228 if c.SyncPointInterval != nil { 229 res.SyncPointInterval = &c.SyncPointInterval.duration 230 } 231 if c.SyncPointRetention != nil { 232 res.SyncPointRetention = &c.SyncPointRetention.duration 233 } 234 res.BDRMode = c.BDRMode 235 236 if c.Filter != nil { 237 var efs []*config.EventFilterRule 238 if len(c.Filter.EventFilters) != 0 { 239 efs = make([]*config.EventFilterRule, len(c.Filter.EventFilters)) 240 for i, ef := range c.Filter.EventFilters { 241 efs[i] = ef.ToInternalEventFilterRule() 242 } 243 } 244 res.Filter = &config.FilterConfig{ 245 Rules: c.Filter.Rules, 246 IgnoreTxnStartTs: c.Filter.IgnoreTxnStartTs, 247 EventFilters: efs, 248 } 249 } 250 if c.Consistent != nil { 251 res.Consistent = &config.ConsistentConfig{ 252 Level: c.Consistent.Level, 253 MaxLogSize: c.Consistent.MaxLogSize, 254 FlushIntervalInMs: c.Consistent.FlushIntervalInMs, 255 MetaFlushIntervalInMs: c.Consistent.MetaFlushIntervalInMs, 256 EncodingWorkerNum: c.Consistent.EncodingWorkerNum, 257 FlushWorkerNum: c.Consistent.FlushWorkerNum, 258 Storage: c.Consistent.Storage, 259 UseFileBackend: c.Consistent.UseFileBackend, 260 Compression: c.Consistent.Compression, 261 FlushConcurrency: c.Consistent.FlushConcurrency, 262 } 263 if c.Consistent.MemoryUsage != nil { 264 res.Consistent.MemoryUsage = &config.ConsistentMemoryUsage{ 265 MemoryQuotaPercentage: c.Consistent.MemoryUsage.MemoryQuotaPercentage, 266 } 267 } 268 } 269 if c.Sink != nil { 270 var dispatchRules []*config.DispatchRule 271 for _, rule := range c.Sink.DispatchRules { 272 dispatchRules = append(dispatchRules, &config.DispatchRule{ 273 Matcher: rule.Matcher, 274 DispatcherRule: "", 275 PartitionRule: rule.PartitionRule, 276 IndexName: rule.IndexName, 277 Columns: rule.Columns, 278 TopicRule: rule.TopicRule, 279 }) 280 } 281 var columnSelectors []*config.ColumnSelector 282 for _, selector := range c.Sink.ColumnSelectors { 283 columnSelectors = append(columnSelectors, &config.ColumnSelector{ 284 Matcher: selector.Matcher, 285 Columns: selector.Columns, 286 }) 287 } 288 var csvConfig *config.CSVConfig 289 if c.Sink.CSVConfig != nil { 290 csvConfig = &config.CSVConfig{ 291 Delimiter: c.Sink.CSVConfig.Delimiter, 292 Quote: c.Sink.CSVConfig.Quote, 293 NullString: c.Sink.CSVConfig.NullString, 294 IncludeCommitTs: c.Sink.CSVConfig.IncludeCommitTs, 295 BinaryEncodingMethod: c.Sink.CSVConfig.BinaryEncodingMethod, 296 OutputOldValue: c.Sink.CSVConfig.OutputOldValue, 297 OutputHandleKey: c.Sink.CSVConfig.OutputHandleKey, 298 } 299 } 300 var pulsarConfig *config.PulsarConfig 301 if c.Sink.PulsarConfig != nil { 302 pulsarConfig = &config.PulsarConfig{ 303 TLSKeyFilePath: c.Sink.PulsarConfig.TLSKeyFilePath, 304 TLSCertificateFile: c.Sink.PulsarConfig.TLSCertificateFile, 305 TLSTrustCertsFilePath: c.Sink.PulsarConfig.TLSTrustCertsFilePath, 306 PulsarProducerCacheSize: c.Sink.PulsarConfig.PulsarProducerCacheSize, 307 PulsarVersion: c.Sink.PulsarConfig.PulsarVersion, 308 CompressionType: (*config.PulsarCompressionType)(c.Sink.PulsarConfig.CompressionType), 309 AuthenticationToken: c.Sink.PulsarConfig.AuthenticationToken, 310 ConnectionTimeout: (*config.TimeSec)(c.Sink.PulsarConfig.ConnectionTimeout), 311 OperationTimeout: (*config.TimeSec)(c.Sink.PulsarConfig.OperationTimeout), 312 BatchingMaxMessages: c.Sink.PulsarConfig.BatchingMaxMessages, 313 BatchingMaxPublishDelay: (*config.TimeMill)(c.Sink.PulsarConfig.BatchingMaxPublishDelay), 314 SendTimeout: (*config.TimeSec)(c.Sink.PulsarConfig.SendTimeout), 315 TokenFromFile: c.Sink.PulsarConfig.TokenFromFile, 316 BasicUserName: c.Sink.PulsarConfig.BasicUserName, 317 BasicPassword: c.Sink.PulsarConfig.BasicPassword, 318 AuthTLSCertificatePath: c.Sink.PulsarConfig.AuthTLSCertificatePath, 319 AuthTLSPrivateKeyPath: c.Sink.PulsarConfig.AuthTLSPrivateKeyPath, 320 } 321 if c.Sink.PulsarConfig.OAuth2 != nil { 322 pulsarConfig.OAuth2 = &config.OAuth2{ 323 OAuth2IssuerURL: c.Sink.PulsarConfig.OAuth2.OAuth2IssuerURL, 324 OAuth2Audience: c.Sink.PulsarConfig.OAuth2.OAuth2Audience, 325 OAuth2PrivateKey: c.Sink.PulsarConfig.OAuth2.OAuth2PrivateKey, 326 OAuth2ClientID: c.Sink.PulsarConfig.OAuth2.OAuth2ClientID, 327 OAuth2Scope: c.Sink.PulsarConfig.OAuth2.OAuth2Scope, 328 } 329 } 330 } 331 332 var kafkaConfig *config.KafkaConfig 333 if c.Sink.KafkaConfig != nil { 334 var codeConfig *config.CodecConfig 335 if c.Sink.KafkaConfig.CodecConfig != nil { 336 oldConfig := c.Sink.KafkaConfig.CodecConfig 337 codeConfig = &config.CodecConfig{ 338 EnableTiDBExtension: oldConfig.EnableTiDBExtension, 339 MaxBatchSize: oldConfig.MaxBatchSize, 340 AvroEnableWatermark: oldConfig.AvroEnableWatermark, 341 AvroDecimalHandlingMode: oldConfig.AvroDecimalHandlingMode, 342 AvroBigintUnsignedHandlingMode: oldConfig.AvroBigintUnsignedHandlingMode, 343 EncodingFormat: oldConfig.EncodingFormat, 344 } 345 } 346 347 var largeMessageHandle *config.LargeMessageHandleConfig 348 if c.Sink.KafkaConfig.LargeMessageHandle != nil { 349 oldConfig := c.Sink.KafkaConfig.LargeMessageHandle 350 largeMessageHandle = &config.LargeMessageHandleConfig{ 351 LargeMessageHandleOption: oldConfig.LargeMessageHandleOption, 352 LargeMessageHandleCompression: oldConfig.LargeMessageHandleCompression, 353 ClaimCheckStorageURI: oldConfig.ClaimCheckStorageURI, 354 } 355 } 356 357 var glueSchemaRegistryConfig *config.GlueSchemaRegistryConfig 358 if c.Sink.KafkaConfig.GlueSchemaRegistryConfig != nil { 359 glueSchemaRegistryConfig = &config.GlueSchemaRegistryConfig{ 360 RegistryName: c.Sink.KafkaConfig.GlueSchemaRegistryConfig.RegistryName, 361 Region: c.Sink.KafkaConfig.GlueSchemaRegistryConfig.Region, 362 AccessKey: c.Sink.KafkaConfig.GlueSchemaRegistryConfig.AccessKey, 363 SecretAccessKey: c.Sink.KafkaConfig.GlueSchemaRegistryConfig.SecretAccessKey, 364 Token: c.Sink.KafkaConfig.GlueSchemaRegistryConfig.Token, 365 } 366 } 367 368 kafkaConfig = &config.KafkaConfig{ 369 PartitionNum: c.Sink.KafkaConfig.PartitionNum, 370 ReplicationFactor: c.Sink.KafkaConfig.ReplicationFactor, 371 KafkaVersion: c.Sink.KafkaConfig.KafkaVersion, 372 MaxMessageBytes: c.Sink.KafkaConfig.MaxMessageBytes, 373 Compression: c.Sink.KafkaConfig.Compression, 374 KafkaClientID: c.Sink.KafkaConfig.KafkaClientID, 375 AutoCreateTopic: c.Sink.KafkaConfig.AutoCreateTopic, 376 DialTimeout: c.Sink.KafkaConfig.DialTimeout, 377 WriteTimeout: c.Sink.KafkaConfig.WriteTimeout, 378 ReadTimeout: c.Sink.KafkaConfig.ReadTimeout, 379 RequiredAcks: c.Sink.KafkaConfig.RequiredAcks, 380 SASLUser: c.Sink.KafkaConfig.SASLUser, 381 SASLPassword: c.Sink.KafkaConfig.SASLPassword, 382 SASLMechanism: c.Sink.KafkaConfig.SASLMechanism, 383 SASLGssAPIAuthType: c.Sink.KafkaConfig.SASLGssAPIAuthType, 384 SASLGssAPIKeytabPath: c.Sink.KafkaConfig.SASLGssAPIKeytabPath, 385 SASLGssAPIKerberosConfigPath: c.Sink.KafkaConfig.SASLGssAPIKerberosConfigPath, 386 SASLGssAPIServiceName: c.Sink.KafkaConfig.SASLGssAPIServiceName, 387 SASLGssAPIUser: c.Sink.KafkaConfig.SASLGssAPIUser, 388 SASLGssAPIPassword: c.Sink.KafkaConfig.SASLGssAPIPassword, 389 SASLGssAPIRealm: c.Sink.KafkaConfig.SASLGssAPIRealm, 390 SASLGssAPIDisablePafxfast: c.Sink.KafkaConfig.SASLGssAPIDisablePafxfast, 391 SASLOAuthClientID: c.Sink.KafkaConfig.SASLOAuthClientID, 392 SASLOAuthClientSecret: c.Sink.KafkaConfig.SASLOAuthClientSecret, 393 SASLOAuthTokenURL: c.Sink.KafkaConfig.SASLOAuthTokenURL, 394 SASLOAuthScopes: c.Sink.KafkaConfig.SASLOAuthScopes, 395 SASLOAuthGrantType: c.Sink.KafkaConfig.SASLOAuthGrantType, 396 SASLOAuthAudience: c.Sink.KafkaConfig.SASLOAuthAudience, 397 EnableTLS: c.Sink.KafkaConfig.EnableTLS, 398 CA: c.Sink.KafkaConfig.CA, 399 Cert: c.Sink.KafkaConfig.Cert, 400 Key: c.Sink.KafkaConfig.Key, 401 InsecureSkipVerify: c.Sink.KafkaConfig.InsecureSkipVerify, 402 CodecConfig: codeConfig, 403 LargeMessageHandle: largeMessageHandle, 404 GlueSchemaRegistryConfig: glueSchemaRegistryConfig, 405 } 406 } 407 var mysqlConfig *config.MySQLConfig 408 if c.Sink.MySQLConfig != nil { 409 mysqlConfig = &config.MySQLConfig{ 410 WorkerCount: c.Sink.MySQLConfig.WorkerCount, 411 MaxTxnRow: c.Sink.MySQLConfig.MaxTxnRow, 412 MaxMultiUpdateRowSize: c.Sink.MySQLConfig.MaxMultiUpdateRowSize, 413 MaxMultiUpdateRowCount: c.Sink.MySQLConfig.MaxMultiUpdateRowCount, 414 TiDBTxnMode: c.Sink.MySQLConfig.TiDBTxnMode, 415 SSLCa: c.Sink.MySQLConfig.SSLCa, 416 SSLCert: c.Sink.MySQLConfig.SSLCert, 417 SSLKey: c.Sink.MySQLConfig.SSLKey, 418 TimeZone: c.Sink.MySQLConfig.TimeZone, 419 WriteTimeout: c.Sink.MySQLConfig.WriteTimeout, 420 ReadTimeout: c.Sink.MySQLConfig.ReadTimeout, 421 Timeout: c.Sink.MySQLConfig.Timeout, 422 EnableBatchDML: c.Sink.MySQLConfig.EnableBatchDML, 423 EnableMultiStatement: c.Sink.MySQLConfig.EnableMultiStatement, 424 EnableCachePreparedStatement: c.Sink.MySQLConfig.EnableCachePreparedStatement, 425 } 426 } 427 var cloudStorageConfig *config.CloudStorageConfig 428 if c.Sink.CloudStorageConfig != nil { 429 cloudStorageConfig = &config.CloudStorageConfig{ 430 WorkerCount: c.Sink.CloudStorageConfig.WorkerCount, 431 FlushInterval: c.Sink.CloudStorageConfig.FlushInterval, 432 FileSize: c.Sink.CloudStorageConfig.FileSize, 433 OutputColumnID: c.Sink.CloudStorageConfig.OutputColumnID, 434 FileExpirationDays: c.Sink.CloudStorageConfig.FileExpirationDays, 435 FileCleanupCronSpec: c.Sink.CloudStorageConfig.FileCleanupCronSpec, 436 FlushConcurrency: c.Sink.CloudStorageConfig.FlushConcurrency, 437 } 438 } 439 var debeziumConfig *config.DebeziumConfig 440 if c.Sink.DebeziumConfig != nil { 441 debeziumConfig = &config.DebeziumConfig{ 442 OutputOldValue: c.Sink.DebeziumConfig.OutputOldValue, 443 } 444 } 445 var openProtocolConfig *config.OpenProtocolConfig 446 if c.Sink.OpenProtocolConfig != nil { 447 openProtocolConfig = &config.OpenProtocolConfig{ 448 OutputOldValue: c.Sink.OpenProtocolConfig.OutputOldValue, 449 } 450 } 451 452 res.Sink = &config.SinkConfig{ 453 DispatchRules: dispatchRules, 454 Protocol: c.Sink.Protocol, 455 CSVConfig: csvConfig, 456 ColumnSelectors: columnSelectors, 457 SchemaRegistry: c.Sink.SchemaRegistry, 458 EncoderConcurrency: c.Sink.EncoderConcurrency, 459 Terminator: c.Sink.Terminator, 460 DateSeparator: c.Sink.DateSeparator, 461 EnablePartitionSeparator: c.Sink.EnablePartitionSeparator, 462 FileIndexWidth: c.Sink.FileIndexWidth, 463 EnableKafkaSinkV2: c.Sink.EnableKafkaSinkV2, 464 OnlyOutputUpdatedColumns: c.Sink.OnlyOutputUpdatedColumns, 465 DeleteOnlyOutputHandleKeyColumns: c.Sink.DeleteOnlyOutputHandleKeyColumns, 466 ContentCompatible: c.Sink.ContentCompatible, 467 KafkaConfig: kafkaConfig, 468 MySQLConfig: mysqlConfig, 469 PulsarConfig: pulsarConfig, 470 CloudStorageConfig: cloudStorageConfig, 471 SafeMode: c.Sink.SafeMode, 472 OpenProtocol: openProtocolConfig, 473 Debezium: debeziumConfig, 474 } 475 476 if c.Sink.TxnAtomicity != nil { 477 res.Sink.TxnAtomicity = util.AddressOf(config.AtomicityLevel(*c.Sink.TxnAtomicity)) 478 } 479 if c.Sink.AdvanceTimeoutInSec != nil { 480 res.Sink.AdvanceTimeoutInSec = util.AddressOf(*c.Sink.AdvanceTimeoutInSec) 481 } 482 if c.Sink.DebeziumDisableSchema != nil { 483 res.Sink.DebeziumDisableSchema = util.AddressOf(*c.Sink.DebeziumDisableSchema) 484 } 485 486 if c.Sink.SendBootstrapIntervalInSec != nil { 487 res.Sink.SendBootstrapIntervalInSec = util.AddressOf(*c.Sink.SendBootstrapIntervalInSec) 488 } 489 490 if c.Sink.SendBootstrapInMsgCount != nil { 491 res.Sink.SendBootstrapInMsgCount = util.AddressOf(*c.Sink.SendBootstrapInMsgCount) 492 } 493 494 if c.Sink.SendBootstrapToAllPartition != nil { 495 res.Sink.SendBootstrapToAllPartition = util.AddressOf(*c.Sink.SendBootstrapToAllPartition) 496 } 497 } 498 if c.Mounter != nil { 499 res.Mounter = &config.MounterConfig{ 500 WorkerNum: c.Mounter.WorkerNum, 501 } 502 } 503 if c.Scheduler != nil { 504 res.Scheduler = &config.ChangefeedSchedulerConfig{ 505 EnableTableAcrossNodes: c.Scheduler.EnableTableAcrossNodes, 506 RegionThreshold: c.Scheduler.RegionThreshold, 507 WriteKeyThreshold: c.Scheduler.WriteKeyThreshold, 508 } 509 } 510 if c.Integrity != nil { 511 res.Integrity = &integrity.Config{ 512 IntegrityCheckLevel: c.Integrity.IntegrityCheckLevel, 513 CorruptionHandleLevel: c.Integrity.CorruptionHandleLevel, 514 } 515 } 516 if c.ChangefeedErrorStuckDuration != nil { 517 res.ChangefeedErrorStuckDuration = &c.ChangefeedErrorStuckDuration.duration 518 } 519 if c.SyncedStatus != nil { 520 res.SyncedStatus = &config.SyncedStatusConfig{ 521 SyncedCheckInterval: c.SyncedStatus.SyncedCheckInterval, 522 CheckpointInterval: c.SyncedStatus.CheckpointInterval, 523 } 524 } 525 return res 526 } 527 528 // ToAPIReplicaConfig coverts *config.ReplicaConfig into *v2.ReplicaConfig 529 func ToAPIReplicaConfig(c *config.ReplicaConfig) *ReplicaConfig { 530 cloned := c.Clone() 531 532 res := &ReplicaConfig{ 533 MemoryQuota: cloned.MemoryQuota, 534 CaseSensitive: cloned.CaseSensitive, 535 ForceReplicate: cloned.ForceReplicate, 536 IgnoreIneligibleTable: cloned.IgnoreIneligibleTable, 537 CheckGCSafePoint: cloned.CheckGCSafePoint, 538 EnableSyncPoint: cloned.EnableSyncPoint, 539 EnableTableMonitor: cloned.EnableTableMonitor, 540 BDRMode: cloned.BDRMode, 541 } 542 543 if cloned.SyncPointInterval != nil { 544 res.SyncPointInterval = &JSONDuration{*cloned.SyncPointInterval} 545 } 546 547 if cloned.SyncPointRetention != nil { 548 res.SyncPointRetention = &JSONDuration{*cloned.SyncPointRetention} 549 } 550 551 if cloned.Filter != nil { 552 var efs []EventFilterRule 553 if len(c.Filter.EventFilters) != 0 { 554 efs = make([]EventFilterRule, len(c.Filter.EventFilters)) 555 for i, ef := range c.Filter.EventFilters { 556 efs[i] = ToAPIEventFilterRule(ef) 557 } 558 } 559 560 res.Filter = &FilterConfig{ 561 Rules: cloned.Filter.Rules, 562 IgnoreTxnStartTs: cloned.Filter.IgnoreTxnStartTs, 563 EventFilters: efs, 564 } 565 } 566 if cloned.Sink != nil { 567 var dispatchRules []*DispatchRule 568 for _, rule := range cloned.Sink.DispatchRules { 569 dispatchRules = append(dispatchRules, &DispatchRule{ 570 Matcher: rule.Matcher, 571 PartitionRule: rule.PartitionRule, 572 IndexName: rule.IndexName, 573 Columns: rule.Columns, 574 TopicRule: rule.TopicRule, 575 }) 576 } 577 var columnSelectors []*ColumnSelector 578 for _, selector := range cloned.Sink.ColumnSelectors { 579 columnSelectors = append(columnSelectors, &ColumnSelector{ 580 Matcher: selector.Matcher, 581 Columns: selector.Columns, 582 }) 583 } 584 var csvConfig *CSVConfig 585 if cloned.Sink.CSVConfig != nil { 586 csvConfig = &CSVConfig{ 587 Delimiter: cloned.Sink.CSVConfig.Delimiter, 588 Quote: cloned.Sink.CSVConfig.Quote, 589 NullString: cloned.Sink.CSVConfig.NullString, 590 IncludeCommitTs: cloned.Sink.CSVConfig.IncludeCommitTs, 591 BinaryEncodingMethod: cloned.Sink.CSVConfig.BinaryEncodingMethod, 592 OutputOldValue: cloned.Sink.CSVConfig.OutputOldValue, 593 OutputHandleKey: cloned.Sink.CSVConfig.OutputHandleKey, 594 } 595 } 596 var kafkaConfig *KafkaConfig 597 if cloned.Sink.KafkaConfig != nil { 598 var codeConfig *CodecConfig 599 if cloned.Sink.KafkaConfig.CodecConfig != nil { 600 oldConfig := cloned.Sink.KafkaConfig.CodecConfig 601 codeConfig = &CodecConfig{ 602 EnableTiDBExtension: oldConfig.EnableTiDBExtension, 603 MaxBatchSize: oldConfig.MaxBatchSize, 604 AvroEnableWatermark: oldConfig.AvroEnableWatermark, 605 AvroDecimalHandlingMode: oldConfig.AvroDecimalHandlingMode, 606 AvroBigintUnsignedHandlingMode: oldConfig.AvroBigintUnsignedHandlingMode, 607 EncodingFormat: oldConfig.EncodingFormat, 608 } 609 } 610 611 var largeMessageHandle *LargeMessageHandleConfig 612 if cloned.Sink.KafkaConfig.LargeMessageHandle != nil { 613 oldConfig := cloned.Sink.KafkaConfig.LargeMessageHandle 614 largeMessageHandle = &LargeMessageHandleConfig{ 615 LargeMessageHandleOption: oldConfig.LargeMessageHandleOption, 616 LargeMessageHandleCompression: oldConfig.LargeMessageHandleCompression, 617 ClaimCheckStorageURI: oldConfig.ClaimCheckStorageURI, 618 } 619 } 620 621 var glueSchemaRegistryConfig *GlueSchemaRegistryConfig 622 if cloned.Sink.KafkaConfig.GlueSchemaRegistryConfig != nil { 623 glueSchemaRegistryConfig = &GlueSchemaRegistryConfig{ 624 RegistryName: cloned.Sink.KafkaConfig.GlueSchemaRegistryConfig.RegistryName, 625 Region: cloned.Sink.KafkaConfig.GlueSchemaRegistryConfig.Region, 626 AccessKey: cloned.Sink.KafkaConfig.GlueSchemaRegistryConfig.AccessKey, 627 SecretAccessKey: cloned.Sink.KafkaConfig.GlueSchemaRegistryConfig.SecretAccessKey, 628 Token: cloned.Sink.KafkaConfig.GlueSchemaRegistryConfig.Token, 629 } 630 } 631 632 kafkaConfig = &KafkaConfig{ 633 PartitionNum: cloned.Sink.KafkaConfig.PartitionNum, 634 ReplicationFactor: cloned.Sink.KafkaConfig.ReplicationFactor, 635 KafkaVersion: cloned.Sink.KafkaConfig.KafkaVersion, 636 MaxMessageBytes: cloned.Sink.KafkaConfig.MaxMessageBytes, 637 Compression: cloned.Sink.KafkaConfig.Compression, 638 KafkaClientID: cloned.Sink.KafkaConfig.KafkaClientID, 639 AutoCreateTopic: cloned.Sink.KafkaConfig.AutoCreateTopic, 640 DialTimeout: cloned.Sink.KafkaConfig.DialTimeout, 641 WriteTimeout: cloned.Sink.KafkaConfig.WriteTimeout, 642 ReadTimeout: cloned.Sink.KafkaConfig.ReadTimeout, 643 RequiredAcks: cloned.Sink.KafkaConfig.RequiredAcks, 644 SASLUser: cloned.Sink.KafkaConfig.SASLUser, 645 SASLPassword: cloned.Sink.KafkaConfig.SASLPassword, 646 SASLMechanism: cloned.Sink.KafkaConfig.SASLMechanism, 647 SASLGssAPIAuthType: cloned.Sink.KafkaConfig.SASLGssAPIAuthType, 648 SASLGssAPIKeytabPath: cloned.Sink.KafkaConfig.SASLGssAPIKeytabPath, 649 SASLGssAPIKerberosConfigPath: cloned.Sink.KafkaConfig.SASLGssAPIKerberosConfigPath, 650 SASLGssAPIServiceName: cloned.Sink.KafkaConfig.SASLGssAPIServiceName, 651 SASLGssAPIUser: cloned.Sink.KafkaConfig.SASLGssAPIUser, 652 SASLGssAPIPassword: cloned.Sink.KafkaConfig.SASLGssAPIPassword, 653 SASLGssAPIRealm: cloned.Sink.KafkaConfig.SASLGssAPIRealm, 654 SASLGssAPIDisablePafxfast: cloned.Sink.KafkaConfig.SASLGssAPIDisablePafxfast, 655 SASLOAuthClientID: cloned.Sink.KafkaConfig.SASLOAuthClientID, 656 SASLOAuthClientSecret: cloned.Sink.KafkaConfig.SASLOAuthClientSecret, 657 SASLOAuthTokenURL: cloned.Sink.KafkaConfig.SASLOAuthTokenURL, 658 SASLOAuthScopes: cloned.Sink.KafkaConfig.SASLOAuthScopes, 659 SASLOAuthGrantType: cloned.Sink.KafkaConfig.SASLOAuthGrantType, 660 SASLOAuthAudience: cloned.Sink.KafkaConfig.SASLOAuthAudience, 661 EnableTLS: cloned.Sink.KafkaConfig.EnableTLS, 662 CA: cloned.Sink.KafkaConfig.CA, 663 Cert: cloned.Sink.KafkaConfig.Cert, 664 Key: cloned.Sink.KafkaConfig.Key, 665 InsecureSkipVerify: cloned.Sink.KafkaConfig.InsecureSkipVerify, 666 CodecConfig: codeConfig, 667 LargeMessageHandle: largeMessageHandle, 668 GlueSchemaRegistryConfig: glueSchemaRegistryConfig, 669 } 670 } 671 var mysqlConfig *MySQLConfig 672 if cloned.Sink.MySQLConfig != nil { 673 mysqlConfig = &MySQLConfig{ 674 WorkerCount: cloned.Sink.MySQLConfig.WorkerCount, 675 MaxTxnRow: cloned.Sink.MySQLConfig.MaxTxnRow, 676 MaxMultiUpdateRowSize: cloned.Sink.MySQLConfig.MaxMultiUpdateRowSize, 677 MaxMultiUpdateRowCount: cloned.Sink.MySQLConfig.MaxMultiUpdateRowCount, 678 TiDBTxnMode: cloned.Sink.MySQLConfig.TiDBTxnMode, 679 SSLCa: cloned.Sink.MySQLConfig.SSLCa, 680 SSLCert: cloned.Sink.MySQLConfig.SSLCert, 681 SSLKey: cloned.Sink.MySQLConfig.SSLKey, 682 TimeZone: cloned.Sink.MySQLConfig.TimeZone, 683 WriteTimeout: cloned.Sink.MySQLConfig.WriteTimeout, 684 ReadTimeout: cloned.Sink.MySQLConfig.ReadTimeout, 685 Timeout: cloned.Sink.MySQLConfig.Timeout, 686 EnableBatchDML: cloned.Sink.MySQLConfig.EnableBatchDML, 687 EnableMultiStatement: cloned.Sink.MySQLConfig.EnableMultiStatement, 688 EnableCachePreparedStatement: cloned.Sink.MySQLConfig.EnableCachePreparedStatement, 689 } 690 } 691 var pulsarConfig *PulsarConfig 692 if cloned.Sink.PulsarConfig != nil { 693 pulsarConfig = &PulsarConfig{ 694 TLSKeyFilePath: cloned.Sink.PulsarConfig.TLSKeyFilePath, 695 TLSCertificateFile: cloned.Sink.PulsarConfig.TLSCertificateFile, 696 TLSTrustCertsFilePath: cloned.Sink.PulsarConfig.TLSTrustCertsFilePath, 697 PulsarProducerCacheSize: cloned.Sink.PulsarConfig.PulsarProducerCacheSize, 698 PulsarVersion: cloned.Sink.PulsarConfig.PulsarVersion, 699 CompressionType: (*string)(cloned.Sink.PulsarConfig.CompressionType), 700 AuthenticationToken: cloned.Sink.PulsarConfig.AuthenticationToken, 701 ConnectionTimeout: (*int)(cloned.Sink.PulsarConfig.ConnectionTimeout), 702 OperationTimeout: (*int)(cloned.Sink.PulsarConfig.OperationTimeout), 703 BatchingMaxMessages: cloned.Sink.PulsarConfig.BatchingMaxMessages, 704 BatchingMaxPublishDelay: (*int)(cloned.Sink.PulsarConfig.BatchingMaxPublishDelay), 705 SendTimeout: (*int)(cloned.Sink.PulsarConfig.SendTimeout), 706 TokenFromFile: cloned.Sink.PulsarConfig.TokenFromFile, 707 BasicUserName: cloned.Sink.PulsarConfig.BasicUserName, 708 BasicPassword: cloned.Sink.PulsarConfig.BasicPassword, 709 AuthTLSCertificatePath: cloned.Sink.PulsarConfig.AuthTLSCertificatePath, 710 AuthTLSPrivateKeyPath: cloned.Sink.PulsarConfig.AuthTLSPrivateKeyPath, 711 } 712 if cloned.Sink.PulsarConfig.OAuth2 != nil { 713 pulsarConfig.OAuth2 = &PulsarOAuth2{ 714 OAuth2IssuerURL: cloned.Sink.PulsarConfig.OAuth2.OAuth2IssuerURL, 715 OAuth2Audience: cloned.Sink.PulsarConfig.OAuth2.OAuth2Audience, 716 OAuth2PrivateKey: cloned.Sink.PulsarConfig.OAuth2.OAuth2PrivateKey, 717 OAuth2ClientID: cloned.Sink.PulsarConfig.OAuth2.OAuth2ClientID, 718 OAuth2Scope: cloned.Sink.PulsarConfig.OAuth2.OAuth2Scope, 719 } 720 } 721 } 722 var cloudStorageConfig *CloudStorageConfig 723 if cloned.Sink.CloudStorageConfig != nil { 724 cloudStorageConfig = &CloudStorageConfig{ 725 WorkerCount: cloned.Sink.CloudStorageConfig.WorkerCount, 726 FlushInterval: cloned.Sink.CloudStorageConfig.FlushInterval, 727 FileSize: cloned.Sink.CloudStorageConfig.FileSize, 728 OutputColumnID: cloned.Sink.CloudStorageConfig.OutputColumnID, 729 FileExpirationDays: cloned.Sink.CloudStorageConfig.FileExpirationDays, 730 FileCleanupCronSpec: cloned.Sink.CloudStorageConfig.FileCleanupCronSpec, 731 FlushConcurrency: cloned.Sink.CloudStorageConfig.FlushConcurrency, 732 } 733 } 734 var debeziumConfig *DebeziumConfig 735 if cloned.Sink.Debezium != nil { 736 debeziumConfig = &DebeziumConfig{ 737 OutputOldValue: cloned.Sink.Debezium.OutputOldValue, 738 } 739 } 740 var openProtocolConfig *OpenProtocolConfig 741 if cloned.Sink.OpenProtocol != nil { 742 openProtocolConfig = &OpenProtocolConfig{ 743 OutputOldValue: cloned.Sink.OpenProtocol.OutputOldValue, 744 } 745 } 746 res.Sink = &SinkConfig{ 747 Protocol: cloned.Sink.Protocol, 748 SchemaRegistry: cloned.Sink.SchemaRegistry, 749 DispatchRules: dispatchRules, 750 CSVConfig: csvConfig, 751 ColumnSelectors: columnSelectors, 752 EncoderConcurrency: cloned.Sink.EncoderConcurrency, 753 Terminator: cloned.Sink.Terminator, 754 DateSeparator: cloned.Sink.DateSeparator, 755 EnablePartitionSeparator: cloned.Sink.EnablePartitionSeparator, 756 FileIndexWidth: cloned.Sink.FileIndexWidth, 757 EnableKafkaSinkV2: cloned.Sink.EnableKafkaSinkV2, 758 OnlyOutputUpdatedColumns: cloned.Sink.OnlyOutputUpdatedColumns, 759 DeleteOnlyOutputHandleKeyColumns: cloned.Sink.DeleteOnlyOutputHandleKeyColumns, 760 ContentCompatible: cloned.Sink.ContentCompatible, 761 KafkaConfig: kafkaConfig, 762 MySQLConfig: mysqlConfig, 763 PulsarConfig: pulsarConfig, 764 CloudStorageConfig: cloudStorageConfig, 765 SafeMode: cloned.Sink.SafeMode, 766 DebeziumConfig: debeziumConfig, 767 OpenProtocolConfig: openProtocolConfig, 768 } 769 770 if cloned.Sink.TxnAtomicity != nil { 771 res.Sink.TxnAtomicity = util.AddressOf(string(*cloned.Sink.TxnAtomicity)) 772 } 773 if cloned.Sink.AdvanceTimeoutInSec != nil { 774 res.Sink.AdvanceTimeoutInSec = util.AddressOf(*cloned.Sink.AdvanceTimeoutInSec) 775 } 776 777 if cloned.Sink.SendBootstrapIntervalInSec != nil { 778 res.Sink.SendBootstrapIntervalInSec = util.AddressOf(*cloned.Sink.SendBootstrapIntervalInSec) 779 } 780 781 if cloned.Sink.SendBootstrapInMsgCount != nil { 782 res.Sink.SendBootstrapInMsgCount = util.AddressOf(*cloned.Sink.SendBootstrapInMsgCount) 783 } 784 785 if cloned.Sink.SendBootstrapToAllPartition != nil { 786 res.Sink.SendBootstrapToAllPartition = util.AddressOf(*cloned.Sink.SendBootstrapToAllPartition) 787 } 788 789 if cloned.Sink.DebeziumDisableSchema != nil { 790 res.Sink.DebeziumDisableSchema = util.AddressOf(*cloned.Sink.DebeziumDisableSchema) 791 } 792 } 793 if cloned.Consistent != nil { 794 res.Consistent = &ConsistentConfig{ 795 Level: cloned.Consistent.Level, 796 MaxLogSize: cloned.Consistent.MaxLogSize, 797 FlushIntervalInMs: cloned.Consistent.FlushIntervalInMs, 798 MetaFlushIntervalInMs: cloned.Consistent.MetaFlushIntervalInMs, 799 EncodingWorkerNum: c.Consistent.EncodingWorkerNum, 800 FlushWorkerNum: c.Consistent.FlushWorkerNum, 801 Storage: cloned.Consistent.Storage, 802 UseFileBackend: cloned.Consistent.UseFileBackend, 803 Compression: cloned.Consistent.Compression, 804 FlushConcurrency: cloned.Consistent.FlushConcurrency, 805 } 806 if cloned.Consistent.MemoryUsage != nil { 807 res.Consistent.MemoryUsage = &ConsistentMemoryUsage{ 808 MemoryQuotaPercentage: cloned.Consistent.MemoryUsage.MemoryQuotaPercentage, 809 } 810 } 811 } 812 813 if cloned.Mounter != nil { 814 res.Mounter = &MounterConfig{ 815 WorkerNum: cloned.Mounter.WorkerNum, 816 } 817 } 818 if cloned.Scheduler != nil { 819 res.Scheduler = &ChangefeedSchedulerConfig{ 820 EnableTableAcrossNodes: cloned.Scheduler.EnableTableAcrossNodes, 821 RegionThreshold: cloned.Scheduler.RegionThreshold, 822 WriteKeyThreshold: cloned.Scheduler.WriteKeyThreshold, 823 } 824 } 825 826 if cloned.Integrity != nil { 827 res.Integrity = &IntegrityConfig{ 828 IntegrityCheckLevel: cloned.Integrity.IntegrityCheckLevel, 829 CorruptionHandleLevel: cloned.Integrity.CorruptionHandleLevel, 830 } 831 } 832 if cloned.ChangefeedErrorStuckDuration != nil { 833 res.ChangefeedErrorStuckDuration = &JSONDuration{*cloned.ChangefeedErrorStuckDuration} 834 } 835 if cloned.SyncedStatus != nil { 836 res.SyncedStatus = &SyncedStatusConfig{ 837 SyncedCheckInterval: cloned.SyncedStatus.SyncedCheckInterval, 838 CheckpointInterval: cloned.SyncedStatus.CheckpointInterval, 839 } 840 } 841 return res 842 } 843 844 // GetDefaultReplicaConfig returns a default ReplicaConfig 845 func GetDefaultReplicaConfig() *ReplicaConfig { 846 return ToAPIReplicaConfig(config.GetDefaultReplicaConfig()) 847 } 848 849 // FilterConfig represents filter config for a changefeed 850 // This is a duplicate of config.FilterConfig 851 type FilterConfig struct { 852 Rules []string `json:"rules,omitempty"` 853 IgnoreTxnStartTs []uint64 `json:"ignore_txn_start_ts,omitempty"` 854 EventFilters []EventFilterRule `json:"event_filters,omitempty"` 855 } 856 857 // MounterConfig represents mounter config for a changefeed 858 type MounterConfig struct { 859 WorkerNum int `json:"worker_num"` 860 } 861 862 // EventFilterRule is used by sql event filter and expression filter 863 type EventFilterRule struct { 864 Matcher []string `json:"matcher"` 865 IgnoreEvent []string `json:"ignore_event"` 866 // regular expression 867 IgnoreSQL []string `toml:"ignore_sql" json:"ignore_sql"` 868 // sql expression 869 IgnoreInsertValueExpr string `json:"ignore_insert_value_expr"` 870 IgnoreUpdateNewValueExpr string `json:"ignore_update_new_value_expr"` 871 IgnoreUpdateOldValueExpr string `json:"ignore_update_old_value_expr"` 872 IgnoreDeleteValueExpr string `json:"ignore_delete_value_expr"` 873 } 874 875 // ToInternalEventFilterRule converts EventFilterRule to *config.EventFilterRule 876 func (e EventFilterRule) ToInternalEventFilterRule() *config.EventFilterRule { 877 res := &config.EventFilterRule{ 878 Matcher: e.Matcher, 879 IgnoreSQL: e.IgnoreSQL, 880 IgnoreInsertValueExpr: e.IgnoreInsertValueExpr, 881 IgnoreUpdateNewValueExpr: e.IgnoreUpdateNewValueExpr, 882 IgnoreUpdateOldValueExpr: e.IgnoreUpdateOldValueExpr, 883 IgnoreDeleteValueExpr: e.IgnoreDeleteValueExpr, 884 } 885 if len(e.IgnoreEvent) != 0 { 886 res.IgnoreEvent = make([]bf.EventType, len(e.IgnoreEvent)) 887 for i, et := range e.IgnoreEvent { 888 res.IgnoreEvent[i] = bf.EventType(et) 889 } 890 } 891 return res 892 } 893 894 // ToAPIEventFilterRule converts *config.EventFilterRule to API EventFilterRule 895 func ToAPIEventFilterRule(er *config.EventFilterRule) EventFilterRule { 896 res := EventFilterRule{ 897 IgnoreInsertValueExpr: er.IgnoreInsertValueExpr, 898 IgnoreUpdateNewValueExpr: er.IgnoreUpdateNewValueExpr, 899 IgnoreUpdateOldValueExpr: er.IgnoreUpdateOldValueExpr, 900 IgnoreDeleteValueExpr: er.IgnoreDeleteValueExpr, 901 } 902 if len(er.Matcher) != 0 { 903 res.Matcher = make([]string, len(er.Matcher)) 904 copy(res.Matcher, er.Matcher) 905 } 906 if len(er.IgnoreSQL) != 0 { 907 res.IgnoreSQL = make([]string, len(er.IgnoreSQL)) 908 copy(res.IgnoreSQL, er.IgnoreSQL) 909 } 910 if len(er.IgnoreEvent) != 0 { 911 res.IgnoreEvent = make([]string, len(er.IgnoreEvent)) 912 for i, et := range er.IgnoreEvent { 913 res.IgnoreEvent[i] = string(et) 914 } 915 } 916 return res 917 } 918 919 // Table represents a qualified table name. 920 type Table struct { 921 // Schema is the name of the schema (database) containing this table. 922 Schema string `json:"database_name"` 923 // Name is the unqualified table name. 924 Name string `json:"table_name"` 925 } 926 927 // SinkConfig represents sink config for a changefeed 928 // This is a duplicate of config.SinkConfig 929 type SinkConfig struct { 930 Protocol *string `json:"protocol,omitempty"` 931 SchemaRegistry *string `json:"schema_registry,omitempty"` 932 CSVConfig *CSVConfig `json:"csv,omitempty"` 933 DispatchRules []*DispatchRule `json:"dispatchers,omitempty"` 934 ColumnSelectors []*ColumnSelector `json:"column_selectors,omitempty"` 935 TxnAtomicity *string `json:"transaction_atomicity,omitempty"` 936 EncoderConcurrency *int `json:"encoder_concurrency,omitempty"` 937 Terminator *string `json:"terminator,omitempty"` 938 DateSeparator *string `json:"date_separator,omitempty"` 939 EnablePartitionSeparator *bool `json:"enable_partition_separator,omitempty"` 940 FileIndexWidth *int `json:"file_index_width,omitempty"` 941 EnableKafkaSinkV2 *bool `json:"enable_kafka_sink_v2,omitempty"` 942 OnlyOutputUpdatedColumns *bool `json:"only_output_updated_columns,omitempty"` 943 DeleteOnlyOutputHandleKeyColumns *bool `json:"delete_only_output_handle_key_columns"` 944 ContentCompatible *bool `json:"content_compatible"` 945 SafeMode *bool `json:"safe_mode,omitempty"` 946 KafkaConfig *KafkaConfig `json:"kafka_config,omitempty"` 947 PulsarConfig *PulsarConfig `json:"pulsar_config,omitempty"` 948 MySQLConfig *MySQLConfig `json:"mysql_config,omitempty"` 949 CloudStorageConfig *CloudStorageConfig `json:"cloud_storage_config,omitempty"` 950 AdvanceTimeoutInSec *uint `json:"advance_timeout,omitempty"` 951 SendBootstrapIntervalInSec *int64 `json:"send_bootstrap_interval_in_sec,omitempty"` 952 SendBootstrapInMsgCount *int32 `json:"send_bootstrap_in_msg_count,omitempty"` 953 SendBootstrapToAllPartition *bool `json:"send_bootstrap_to_all_partition,omitempty"` 954 DebeziumDisableSchema *bool `json:"debezium_disable_schema,omitempty"` 955 DebeziumConfig *DebeziumConfig `json:"debezium,omitempty"` 956 OpenProtocolConfig *OpenProtocolConfig `json:"open,omitempty"` 957 } 958 959 // CSVConfig denotes the csv config 960 // This is the same as config.CSVConfig 961 type CSVConfig struct { 962 Delimiter string `json:"delimiter"` 963 Quote string `json:"quote"` 964 NullString string `json:"null"` 965 IncludeCommitTs bool `json:"include_commit_ts"` 966 BinaryEncodingMethod string `json:"binary_encoding_method"` 967 OutputOldValue bool `json:"output_old_value"` 968 OutputHandleKey bool `json:"output_handle_key"` 969 } 970 971 // LargeMessageHandleConfig denotes the large message handling config 972 // This is the same as config.LargeMessageHandleConfig 973 type LargeMessageHandleConfig struct { 974 LargeMessageHandleOption string `json:"large_message_handle_option"` 975 LargeMessageHandleCompression string `json:"large_message_handle_compression"` 976 ClaimCheckStorageURI string `json:"claim_check_storage_uri"` 977 } 978 979 // DispatchRule represents partition rule for a table 980 // This is a duplicate of config.DispatchRule 981 type DispatchRule struct { 982 Matcher []string `json:"matcher,omitempty"` 983 PartitionRule string `json:"partition,omitempty"` 984 IndexName string `json:"index,omitempty"` 985 Columns []string `json:"columns,omitempty"` 986 TopicRule string `json:"topic,omitempty"` 987 } 988 989 // ColumnSelector represents a column selector for a table. 990 // This is a duplicate of config.ColumnSelector 991 type ColumnSelector struct { 992 Matcher []string `json:"matcher,omitempty"` 993 Columns []string `json:"columns,omitempty"` 994 } 995 996 // ConsistentConfig represents replication consistency config for a changefeed 997 // This is a duplicate of config.ConsistentConfig 998 type ConsistentConfig struct { 999 Level string `json:"level,omitempty"` 1000 MaxLogSize int64 `json:"max_log_size"` 1001 FlushIntervalInMs int64 `json:"flush_interval"` 1002 MetaFlushIntervalInMs int64 `json:"meta_flush_interval"` 1003 EncodingWorkerNum int `json:"encoding_worker_num"` 1004 FlushWorkerNum int `json:"flush_worker_num"` 1005 Storage string `json:"storage,omitempty"` 1006 UseFileBackend bool `json:"use_file_backend"` 1007 Compression string `json:"compression,omitempty"` 1008 FlushConcurrency int `json:"flush_concurrency,omitempty"` 1009 1010 MemoryUsage *ConsistentMemoryUsage `json:"memory_usage"` 1011 } 1012 1013 // ConsistentMemoryUsage represents memory usage of Consistent module. 1014 type ConsistentMemoryUsage struct { 1015 MemoryQuotaPercentage uint64 `json:"memory_quota_percentage"` 1016 } 1017 1018 // ChangefeedSchedulerConfig is per changefeed scheduler settings. 1019 // This is a duplicate of config.ChangefeedSchedulerConfig 1020 type ChangefeedSchedulerConfig struct { 1021 // EnableTableAcrossNodes set true to split one table to multiple spans and 1022 // distribute to multiple TiCDC nodes. 1023 EnableTableAcrossNodes bool `toml:"enable_table_across_nodes" json:"enable_table_across_nodes"` 1024 // RegionThreshold is the region count threshold of splitting a table. 1025 RegionThreshold int `toml:"region_threshold" json:"region_threshold"` 1026 // WriteKeyThreshold is the written keys threshold of splitting a table. 1027 WriteKeyThreshold int `toml:"write_key_threshold" json:"write_key_threshold"` 1028 } 1029 1030 // IntegrityConfig is the config for integrity check 1031 // This is a duplicate of Integrity.Config 1032 type IntegrityConfig struct { 1033 IntegrityCheckLevel string `json:"integrity_check_level"` 1034 CorruptionHandleLevel string `json:"corruption_handle_level"` 1035 } 1036 1037 // EtcdData contains key/value pair of etcd data 1038 type EtcdData struct { 1039 Key string `json:"key,omitempty"` 1040 Value string `json:"value,omitempty"` 1041 } 1042 1043 // ResolveLockReq contains request parameter to resolve lock 1044 type ResolveLockReq struct { 1045 RegionID uint64 `json:"region_id,omitempty"` 1046 Ts uint64 `json:"ts,omitempty"` 1047 PDConfig 1048 } 1049 1050 // ChangeFeedInfo describes the detail of a ChangeFeed 1051 type ChangeFeedInfo struct { 1052 UpstreamID uint64 `json:"upstream_id,omitempty"` 1053 Namespace string `json:"namespace,omitempty"` 1054 ID string `json:"id,omitempty"` 1055 SinkURI string `json:"sink_uri,omitempty"` 1056 CreateTime time.Time `json:"create_time"` 1057 // Start sync at this commit ts if `StartTs` is specify or using the CreateTime of changefeed. 1058 StartTs uint64 `json:"start_ts,omitempty"` 1059 // The ChangeFeed will exits until sync to timestamp TargetTs 1060 TargetTs uint64 `json:"target_ts,omitempty"` 1061 // used for admin job notification, trigger watch event in capture 1062 AdminJobType model.AdminJobType `json:"admin_job_type,omitempty"` 1063 Config *ReplicaConfig `json:"config,omitempty"` 1064 State model.FeedState `json:"state,omitempty"` 1065 Error *RunningError `json:"error,omitempty"` 1066 CreatorVersion string `json:"creator_version,omitempty"` 1067 1068 ResolvedTs uint64 `json:"resolved_ts"` 1069 CheckpointTs uint64 `json:"checkpoint_ts"` 1070 CheckpointTime model.JSONTime `json:"checkpoint_time"` 1071 TaskStatus []model.CaptureTaskStatus `json:"task_status,omitempty"` 1072 } 1073 1074 // SyncedStatus describes the detail of a changefeed's synced status 1075 type SyncedStatus struct { 1076 Synced bool `json:"synced"` 1077 SinkCheckpointTs model.JSONTime `json:"sink_checkpoint_ts"` 1078 PullerResolvedTs model.JSONTime `json:"puller_resolved_ts"` 1079 LastSyncedTs model.JSONTime `json:"last_synced_ts"` 1080 NowTs model.JSONTime `json:"now_ts"` 1081 Info string `json:"info"` 1082 } 1083 1084 // RunningError represents some running error from cdc components, 1085 // such as processor. 1086 type RunningError struct { 1087 Time *time.Time `json:"time,omitempty"` 1088 Addr string `json:"addr"` 1089 Code string `json:"code"` 1090 Message string `json:"message"` 1091 } 1092 1093 // toCredential generates a security.Credential from a PDConfig 1094 func (cfg *PDConfig) toCredential() *security.Credential { 1095 credential := &security.Credential{ 1096 CAPath: cfg.CAPath, 1097 CertPath: cfg.CertPath, 1098 KeyPath: cfg.KeyPath, 1099 } 1100 credential.CertAllowedCN = make([]string, len(cfg.CertAllowedCN)) 1101 copy(credential.CertAllowedCN, cfg.CertAllowedCN) 1102 return credential 1103 } 1104 1105 // Marshal returns the json marshal format of a ChangeFeedInfo 1106 func (info *ChangeFeedInfo) Marshal() (string, error) { 1107 data, err := json.Marshal(info) 1108 return string(data), cerror.WrapError(cerror.ErrMarshalFailed, err) 1109 } 1110 1111 // Clone returns a cloned ChangeFeedInfo 1112 func (info *ChangeFeedInfo) Clone() (*ChangeFeedInfo, error) { 1113 s, err := info.Marshal() 1114 if err != nil { 1115 return nil, err 1116 } 1117 cloned := new(ChangeFeedInfo) 1118 err = cloned.Unmarshal([]byte(s)) 1119 return cloned, err 1120 } 1121 1122 // Unmarshal unmarshals into *ChangeFeedInfo from json marshal byte slice 1123 func (info *ChangeFeedInfo) Unmarshal(data []byte) error { 1124 err := json.Unmarshal(data, &info) 1125 if err != nil { 1126 return errors.Annotatef( 1127 cerror.WrapError(cerror.ErrUnmarshalFailed, err), "Unmarshal data: %v", data) 1128 } 1129 return nil 1130 } 1131 1132 // UpstreamConfig contains info to connect to pd 1133 type UpstreamConfig struct { 1134 ID uint64 `json:"id"` 1135 PDConfig 1136 } 1137 1138 // ProcessorDetail holds the detail info of a processor 1139 type ProcessorDetail struct { 1140 // All table ids that this processor are replicating. 1141 Tables []int64 `json:"table_ids"` 1142 } 1143 1144 // Liveness is the liveness status of a capture. 1145 // Liveness can only be changed from alive to stopping, and no way back. 1146 type Liveness int32 1147 1148 // ServerStatus holds some common information of a server 1149 type ServerStatus struct { 1150 Version string `json:"version"` 1151 GitHash string `json:"git_hash"` 1152 ID string `json:"id"` 1153 ClusterID string `json:"cluster_id"` 1154 Pid int `json:"pid"` 1155 IsOwner bool `json:"is_owner"` 1156 Liveness Liveness `json:"liveness"` 1157 } 1158 1159 // Capture holds common information of a capture in cdc 1160 type Capture struct { 1161 ID string `json:"id"` 1162 IsOwner bool `json:"is_owner"` 1163 AdvertiseAddr string `json:"address"` 1164 ClusterID string `json:"cluster_id"` 1165 } 1166 1167 // CodecConfig represents a MQ codec configuration 1168 type CodecConfig struct { 1169 EnableTiDBExtension *bool `json:"enable_tidb_extension,omitempty"` 1170 MaxBatchSize *int `json:"max_batch_size,omitempty"` 1171 AvroEnableWatermark *bool `json:"avro_enable_watermark,omitempty"` 1172 AvroDecimalHandlingMode *string `json:"avro_decimal_handling_mode,omitempty"` 1173 AvroBigintUnsignedHandlingMode *string `json:"avro_bigint_unsigned_handling_mode,omitempty"` 1174 EncodingFormat *string `json:"encoding_format,omitempty"` 1175 } 1176 1177 // PulsarConfig represents a pulsar sink configuration 1178 type PulsarConfig struct { 1179 TLSKeyFilePath *string `json:"tls-certificate-path,omitempty"` 1180 TLSCertificateFile *string `json:"tls-private-key-path,omitempty"` 1181 TLSTrustCertsFilePath *string `json:"tls-trust-certs-file-path,omitempty"` 1182 PulsarProducerCacheSize *int32 `json:"pulsar-producer-cache-size,omitempty"` 1183 PulsarVersion *string `json:"pulsar-version,omitempty"` 1184 CompressionType *string `json:"compression-type,omitempty"` 1185 AuthenticationToken *string `json:"authentication-token,omitempty"` 1186 ConnectionTimeout *int `json:"connection-timeout,omitempty"` 1187 OperationTimeout *int `json:"operation-timeout,omitempty"` 1188 BatchingMaxMessages *uint `json:"batching-max-messages,omitempty"` 1189 BatchingMaxPublishDelay *int `json:"batching-max-publish-delay,omitempty"` 1190 SendTimeout *int `json:"send-timeout,omitempty"` 1191 TokenFromFile *string `json:"token-from-file,omitempty"` 1192 BasicUserName *string `json:"basic-user-name,omitempty"` 1193 BasicPassword *string `json:"basic-password,omitempty"` 1194 AuthTLSCertificatePath *string `json:"auth-tls-certificate-path,omitempty"` 1195 AuthTLSPrivateKeyPath *string `json:"auth-tls-private-key-path,omitempty"` 1196 OAuth2 *PulsarOAuth2 `json:"oauth2,omitempty"` 1197 } 1198 1199 // PulsarOAuth2 is the configuration for OAuth2 1200 type PulsarOAuth2 struct { 1201 OAuth2IssuerURL string `json:"oauth2-issuer-url,omitempty"` 1202 OAuth2Audience string `json:"oauth2-audience,omitempty"` 1203 OAuth2PrivateKey string `json:"oauth2-private-key,omitempty"` 1204 OAuth2ClientID string `json:"oauth2-client-id,omitempty"` 1205 OAuth2Scope string `json:"oauth2-scope,omitempty"` 1206 } 1207 1208 // KafkaConfig represents a kafka sink configuration 1209 type KafkaConfig struct { 1210 PartitionNum *int32 `json:"partition_num,omitempty"` 1211 ReplicationFactor *int16 `json:"replication_factor,omitempty"` 1212 KafkaVersion *string `json:"kafka_version,omitempty"` 1213 MaxMessageBytes *int `json:"max_message_bytes,omitempty"` 1214 Compression *string `json:"compression,omitempty"` 1215 KafkaClientID *string `json:"kafka_client_id,omitempty"` 1216 AutoCreateTopic *bool `json:"auto_create_topic,omitempty"` 1217 DialTimeout *string `json:"dial_timeout,omitempty"` 1218 WriteTimeout *string `json:"write_timeout,omitempty"` 1219 ReadTimeout *string `json:"read_timeout,omitempty"` 1220 RequiredAcks *int `json:"required_acks,omitempty"` 1221 SASLUser *string `json:"sasl_user,omitempty"` 1222 SASLPassword *string `json:"sasl_password,omitempty"` 1223 SASLMechanism *string `json:"sasl_mechanism,omitempty"` 1224 SASLGssAPIAuthType *string `json:"sasl_gssapi_auth_type,omitempty"` 1225 SASLGssAPIKeytabPath *string `json:"sasl_gssapi_keytab_path,omitempty"` 1226 SASLGssAPIKerberosConfigPath *string `json:"sasl_gssapi_kerberos_config_path,omitempty"` 1227 SASLGssAPIServiceName *string `json:"sasl_gssapi_service_name,omitempty"` 1228 SASLGssAPIUser *string `json:"sasl_gssapi_user,omitempty"` 1229 SASLGssAPIPassword *string `json:"sasl_gssapi_password,omitempty"` 1230 SASLGssAPIRealm *string `json:"sasl_gssapi_realm,omitempty"` 1231 SASLGssAPIDisablePafxfast *bool `json:"sasl_gssapi_disable_pafxfast,omitempty"` 1232 SASLOAuthClientID *string `json:"sasl_oauth_client_id,omitempty"` 1233 SASLOAuthClientSecret *string `json:"sasl_oauth_client_secret,omitempty"` 1234 SASLOAuthTokenURL *string `json:"sasl_oauth_token_url,omitempty"` 1235 SASLOAuthScopes []string `json:"sasl_oauth_scopes,omitempty"` 1236 SASLOAuthGrantType *string `json:"sasl_oauth_grant_type,omitempty"` 1237 SASLOAuthAudience *string `json:"sasl_oauth_audience,omitempty"` 1238 EnableTLS *bool `json:"enable_tls,omitempty"` 1239 CA *string `json:"ca,omitempty"` 1240 Cert *string `json:"cert,omitempty"` 1241 Key *string `json:"key,omitempty"` 1242 InsecureSkipVerify *bool `json:"insecure_skip_verify,omitempty"` 1243 CodecConfig *CodecConfig `json:"codec_config,omitempty"` 1244 LargeMessageHandle *LargeMessageHandleConfig `json:"large_message_handle,omitempty"` 1245 GlueSchemaRegistryConfig *GlueSchemaRegistryConfig `json:"glue_schema_registry_config,omitempty"` 1246 } 1247 1248 // MySQLConfig represents a MySQL sink configuration 1249 type MySQLConfig struct { 1250 WorkerCount *int `json:"worker_count,omitempty"` 1251 MaxTxnRow *int `json:"max_txn_row,omitempty"` 1252 MaxMultiUpdateRowSize *int `json:"max_multi_update_row_size,omitempty"` 1253 MaxMultiUpdateRowCount *int `json:"max_multi_update_row_count,omitempty"` 1254 TiDBTxnMode *string `json:"tidb_txn_mode,omitempty"` 1255 SSLCa *string `json:"ssl_ca,omitempty"` 1256 SSLCert *string `json:"ssl_cert,omitempty"` 1257 SSLKey *string `json:"ssl_key,omitempty"` 1258 TimeZone *string `json:"time_zone,omitempty"` 1259 WriteTimeout *string `json:"write_timeout,omitempty"` 1260 ReadTimeout *string `json:"read_timeout,omitempty"` 1261 Timeout *string `json:"timeout,omitempty"` 1262 EnableBatchDML *bool `json:"enable_batch_dml,omitempty"` 1263 EnableMultiStatement *bool `json:"enable_multi_statement,omitempty"` 1264 EnableCachePreparedStatement *bool `json:"enable_cache_prepared_statement,omitempty"` 1265 } 1266 1267 // CloudStorageConfig represents a cloud storage sink configuration 1268 type CloudStorageConfig struct { 1269 WorkerCount *int `json:"worker_count,omitempty"` 1270 FlushInterval *string `json:"flush_interval,omitempty"` 1271 FileSize *int `json:"file_size,omitempty"` 1272 OutputColumnID *bool `json:"output_column_id,omitempty"` 1273 FileExpirationDays *int `json:"file_expiration_days,omitempty"` 1274 FileCleanupCronSpec *string `json:"file_cleanup_cron_spec,omitempty"` 1275 FlushConcurrency *int `json:"flush_concurrency,omitempty"` 1276 } 1277 1278 // ChangefeedStatus holds common information of a changefeed in cdc 1279 type ChangefeedStatus struct { 1280 State string `json:"state,omitempty"` 1281 ResolvedTs uint64 `json:"resolved_ts"` 1282 CheckpointTs uint64 `json:"checkpoint_ts"` 1283 LastError *RunningError `json:"last_error,omitempty"` 1284 LastWarning *RunningError `json:"last_warning,omitempty"` 1285 } 1286 1287 // GlueSchemaRegistryConfig represents a glue schema registry configuration 1288 type GlueSchemaRegistryConfig struct { 1289 // Name of the schema registry 1290 RegistryName string `json:"registry_name"` 1291 // Region of the schema registry 1292 Region string `json:"region"` 1293 // AccessKey of the schema registry 1294 AccessKey string `json:"access_key,omitempty"` 1295 // SecretAccessKey of the schema registry 1296 SecretAccessKey string `json:"secret_access_key,omitempty"` 1297 Token string `json:"token,omitempty"` 1298 } 1299 1300 // OpenProtocolConfig represents the configurations for open protocol encoding 1301 type OpenProtocolConfig struct { 1302 OutputOldValue bool `json:"output_old_value"` 1303 } 1304 1305 // DebeziumConfig represents the configurations for debezium protocol encoding 1306 type DebeziumConfig struct { 1307 OutputOldValue bool `json:"output_old_value"` 1308 }