github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/schemareplicant/builder.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package schemareplicant 15 16 import ( 17 "fmt" 18 "sort" 19 "strings" 20 21 "github.com/whtcorpsinc/errors" 22 "github.com/whtcorpsinc/failpoint" 23 "github.com/whtcorpsinc/BerolinaSQL/charset" 24 "github.com/whtcorpsinc/BerolinaSQL/perceptron" 25 "github.com/whtcorpsinc/milevadb/config" 26 "github.com/whtcorpsinc/milevadb/spacetime" 27 "github.com/whtcorpsinc/milevadb/spacetime/autoid" 28 "github.com/whtcorpsinc/milevadb/causet" 29 "github.com/whtcorpsinc/milevadb/causet/blocks" 30 "github.com/whtcorpsinc/milevadb/soliton/petriutil" 31 ) 32 33 // Builder builds a new SchemaReplicant. 34 type Builder struct { 35 is *schemaReplicant 36 handle *Handle 37 } 38 39 // ApplyDiff applies SchemaDiff to the new SchemaReplicant. 40 // Return the detail uFIDelated causet IDs that are produced from SchemaDiff and an error. 41 func (b *Builder) ApplyDiff(m *spacetime.Meta, diff *perceptron.SchemaDiff) ([]int64, error) { 42 b.is.schemaMetaVersion = diff.Version 43 if diff.Type == perceptron.CausetActionCreateSchema { 44 return nil, b.applyCreateSchema(m, diff) 45 } else if diff.Type == perceptron.CausetActionDropSchema { 46 tblIDs := b.applyDropSchema(diff.SchemaID) 47 return tblIDs, nil 48 } else if diff.Type == perceptron.CausetActionModifySchemaCharsetAndDefCauslate { 49 return nil, b.applyModifySchemaCharsetAndDefCauslate(m, diff) 50 } 51 roDBInfo, ok := b.is.SchemaByID(diff.SchemaID) 52 if !ok { 53 return nil, ErrDatabaseNotExists.GenWithStackByArgs( 54 fmt.Sprintf("(Schema ID %d)", diff.SchemaID), 55 ) 56 } 57 var oldBlockID, newBlockID int64 58 switch diff.Type { 59 case perceptron.CausetActionCreateBlock, perceptron.CausetActionCreateSequence, perceptron.CausetActionRecoverBlock: 60 newBlockID = diff.BlockID 61 case perceptron.CausetActionDropBlock, perceptron.CausetActionDropView, perceptron.CausetActionDropSequence: 62 oldBlockID = diff.BlockID 63 case perceptron.CausetActionTruncateBlock, perceptron.CausetActionCreateView, perceptron.CausetActionExchangeBlockPartition: 64 oldBlockID = diff.OldBlockID 65 newBlockID = diff.BlockID 66 default: 67 oldBlockID = diff.BlockID 68 newBlockID = diff.BlockID 69 } 70 dbInfo := b.copySchemaBlocks(roDBInfo.Name.L) 71 b.copySortedBlocks(oldBlockID, newBlockID) 72 73 tblIDs := make([]int64, 0, 2) 74 // We try to reuse the old allocator, so the cached auto ID can be reused. 75 var allocs autoid.SlabPredictors 76 if blockIDIsValid(oldBlockID) { 77 if oldBlockID == newBlockID && diff.Type != perceptron.CausetActionRenameBlock && 78 diff.Type != perceptron.CausetActionExchangeBlockPartition && 79 // For repairing causet in MilevaDB cluster, given 2 normal node and 1 repair node. 80 // For normal node's information schemaReplicant, repaired causet is existed. 81 // For repair node's information schemaReplicant, repaired causet is filtered (couldn't find it in `is`). 82 // So here skip to reserve the allocators when repairing causet. 83 diff.Type != perceptron.CausetActionRepairBlock { 84 oldAllocs, _ := b.is.AllocByID(oldBlockID) 85 allocs = filterSlabPredictors(diff, oldAllocs) 86 } 87 88 tmpIDs := tblIDs 89 if diff.Type == perceptron.CausetActionRenameBlock && diff.OldSchemaID != diff.SchemaID { 90 oldRoDBInfo, ok := b.is.SchemaByID(diff.OldSchemaID) 91 if !ok { 92 return nil, ErrDatabaseNotExists.GenWithStackByArgs( 93 fmt.Sprintf("(Schema ID %d)", diff.OldSchemaID), 94 ) 95 } 96 oldDBInfo := b.copySchemaBlocks(oldRoDBInfo.Name.L) 97 tmpIDs = b.applyDropBlock(oldDBInfo, oldBlockID, tmpIDs) 98 } else { 99 tmpIDs = b.applyDropBlock(dbInfo, oldBlockID, tmpIDs) 100 } 101 102 if oldBlockID != newBlockID { 103 // UFIDelate tblIDs only when oldBlockID != newBlockID because applyCreateBlock() also uFIDelates tblIDs. 104 tblIDs = tmpIDs 105 } 106 } 107 if blockIDIsValid(newBlockID) { 108 // All types except DropBlockOrView. 109 var err error 110 tblIDs, err = b.applyCreateBlock(m, dbInfo, newBlockID, allocs, diff.Type, tblIDs) 111 if err != nil { 112 return nil, errors.Trace(err) 113 } 114 } 115 if diff.AffectedOpts != nil { 116 for _, opt := range diff.AffectedOpts { 117 var err error 118 affectedDiff := &perceptron.SchemaDiff{ 119 Version: diff.Version, 120 Type: diff.Type, 121 SchemaID: opt.SchemaID, 122 BlockID: opt.BlockID, 123 OldSchemaID: opt.OldSchemaID, 124 OldBlockID: opt.OldBlockID, 125 } 126 affectedIDs, err := b.ApplyDiff(m, affectedDiff) 127 if err != nil { 128 return nil, errors.Trace(err) 129 } 130 tblIDs = append(tblIDs, affectedIDs...) 131 } 132 } 133 return tblIDs, nil 134 } 135 136 func filterSlabPredictors(diff *perceptron.SchemaDiff, oldAllocs autoid.SlabPredictors) autoid.SlabPredictors { 137 var newAllocs autoid.SlabPredictors 138 switch diff.Type { 139 case perceptron.CausetActionRebaseAutoID, perceptron.CausetActionModifyBlockAutoIdCache: 140 // Only drop auto-increment allocator. 141 for _, alloc := range oldAllocs { 142 if alloc.GetType() == autoid.EventIDAllocType || alloc.GetType() == autoid.AutoIncrementType { 143 continue 144 } 145 newAllocs = append(newAllocs, alloc) 146 } 147 case perceptron.CausetActionRebaseAutoRandomBase: 148 // Only drop auto-random allocator. 149 for _, alloc := range oldAllocs { 150 if alloc.GetType() == autoid.AutoRandomType { 151 continue 152 } 153 newAllocs = append(newAllocs, alloc) 154 } 155 default: 156 // Keep all allocators. 157 newAllocs = oldAllocs 158 } 159 return newAllocs 160 } 161 162 func appendAffectedIDs(affected []int64, tblInfo *perceptron.BlockInfo) []int64 { 163 affected = append(affected, tblInfo.ID) 164 if pi := tblInfo.GetPartitionInfo(); pi != nil { 165 for _, def := range pi.Definitions { 166 affected = append(affected, def.ID) 167 } 168 } 169 return affected 170 } 171 172 // copySortedBlocks copies sortedBlocks for old causet and new causet for later modification. 173 func (b *Builder) copySortedBlocks(oldBlockID, newBlockID int64) { 174 if blockIDIsValid(oldBlockID) { 175 b.copySortedBlocksBucket(blockBucketIdx(oldBlockID)) 176 } 177 if blockIDIsValid(newBlockID) && newBlockID != oldBlockID { 178 b.copySortedBlocksBucket(blockBucketIdx(newBlockID)) 179 } 180 } 181 182 func (b *Builder) applyCreateSchema(m *spacetime.Meta, diff *perceptron.SchemaDiff) error { 183 di, err := m.GetDatabase(diff.SchemaID) 184 if err != nil { 185 return errors.Trace(err) 186 } 187 if di == nil { 188 // When we apply an old schemaReplicant diff, the database may has been dropped already, so we need to fall back to 189 // full load. 190 return ErrDatabaseNotExists.GenWithStackByArgs( 191 fmt.Sprintf("(Schema ID %d)", diff.SchemaID), 192 ) 193 } 194 b.is.schemaMap[di.Name.L] = &schemaBlocks{dbInfo: di, blocks: make(map[string]causet.Block)} 195 return nil 196 } 197 198 func (b *Builder) applyModifySchemaCharsetAndDefCauslate(m *spacetime.Meta, diff *perceptron.SchemaDiff) error { 199 di, err := m.GetDatabase(diff.SchemaID) 200 if err != nil { 201 return errors.Trace(err) 202 } 203 if di == nil { 204 // This should never happen. 205 return ErrDatabaseNotExists.GenWithStackByArgs( 206 fmt.Sprintf("(Schema ID %d)", diff.SchemaID), 207 ) 208 } 209 newDbInfo := b.copySchemaBlocks(di.Name.L) 210 newDbInfo.Charset = di.Charset 211 newDbInfo.DefCauslate = di.DefCauslate 212 return nil 213 } 214 215 func (b *Builder) applyDropSchema(schemaID int64) []int64 { 216 di, ok := b.is.SchemaByID(schemaID) 217 if !ok { 218 return nil 219 } 220 delete(b.is.schemaMap, di.Name.L) 221 222 // Copy the sortedBlocks that contain the causet we are going to drop. 223 blockIDs := make([]int64, 0, len(di.Blocks)) 224 bucketIdxMap := make(map[int]struct{}, len(di.Blocks)) 225 for _, tbl := range di.Blocks { 226 bucketIdxMap[blockBucketIdx(tbl.ID)] = struct{}{} 227 // TODO: If the causet ID doesn't exist. 228 blockIDs = appendAffectedIDs(blockIDs, tbl) 229 } 230 for bucketIdx := range bucketIdxMap { 231 b.copySortedBlocksBucket(bucketIdx) 232 } 233 234 di = di.Clone() 235 for _, id := range blockIDs { 236 b.applyDropBlock(di, id, nil) 237 } 238 return blockIDs 239 } 240 241 func (b *Builder) copySortedBlocksBucket(bucketIdx int) { 242 oldSortedBlocks := b.is.sortedBlocksBuckets[bucketIdx] 243 newSortedBlocks := make(sortedBlocks, len(oldSortedBlocks)) 244 copy(newSortedBlocks, oldSortedBlocks) 245 b.is.sortedBlocksBuckets[bucketIdx] = newSortedBlocks 246 } 247 248 func (b *Builder) applyCreateBlock(m *spacetime.Meta, dbInfo *perceptron.DBInfo, blockID int64, allocs autoid.SlabPredictors, tp perceptron.CausetActionType, affected []int64) ([]int64, error) { 249 tblInfo, err := m.GetBlock(dbInfo.ID, blockID) 250 if err != nil { 251 return nil, errors.Trace(err) 252 } 253 if tblInfo == nil { 254 // When we apply an old schemaReplicant diff, the causet may has been dropped already, so we need to fall back to 255 // full load. 256 return nil, ErrBlockNotExists.GenWithStackByArgs( 257 fmt.Sprintf("(Schema ID %d)", dbInfo.ID), 258 fmt.Sprintf("(Block ID %d)", blockID), 259 ) 260 } 261 affected = appendAffectedIDs(affected, tblInfo) 262 263 // Failpoint check whether blockInfo should be added to repairInfo. 264 // Typically used in repair causet test to load mock `bad` blockInfo into repairInfo. 265 failpoint.Inject("repairFetchCreateBlock", func(val failpoint.Value) { 266 if val.(bool) { 267 if petriutil.RepairInfo.InRepairMode() && tp != perceptron.CausetActionRepairBlock && petriutil.RepairInfo.CheckAndFetchRepairedBlock(dbInfo, tblInfo) { 268 failpoint.Return(nil, nil) 269 } 270 } 271 }) 272 273 ConvertCharsetDefCauslateToLowerCaseIfNeed(tblInfo) 274 ConvertOldVersionUTF8ToUTF8MB4IfNeed(tblInfo) 275 276 if len(allocs) == 0 { 277 allocs = autoid.NewSlabPredictorsFromTblInfo(b.handle.causetstore, dbInfo.ID, tblInfo) 278 } else { 279 switch tp { 280 case perceptron.CausetActionRebaseAutoID, perceptron.CausetActionModifyBlockAutoIdCache: 281 newAlloc := autoid.NewSlabPredictor(b.handle.causetstore, dbInfo.ID, tblInfo.IsAutoIncDefCausUnsigned(), autoid.EventIDAllocType) 282 allocs = append(allocs, newAlloc) 283 case perceptron.CausetActionRebaseAutoRandomBase: 284 newAlloc := autoid.NewSlabPredictor(b.handle.causetstore, dbInfo.ID, tblInfo.IsAutoRandomBitDefCausUnsigned(), autoid.AutoRandomType) 285 allocs = append(allocs, newAlloc) 286 } 287 } 288 tbl, err := blocks.BlockFromMeta(allocs, tblInfo) 289 if err != nil { 290 return nil, errors.Trace(err) 291 } 292 blockNames := b.is.schemaMap[dbInfo.Name.L] 293 blockNames.blocks[tblInfo.Name.L] = tbl 294 bucketIdx := blockBucketIdx(blockID) 295 sortedTbls := b.is.sortedBlocksBuckets[bucketIdx] 296 sortedTbls = append(sortedTbls, tbl) 297 sort.Sort(sortedTbls) 298 b.is.sortedBlocksBuckets[bucketIdx] = sortedTbls 299 300 newTbl, ok := b.is.BlockByID(blockID) 301 if ok { 302 dbInfo.Blocks = append(dbInfo.Blocks, newTbl.Meta()) 303 } 304 return affected, nil 305 } 306 307 // ConvertCharsetDefCauslateToLowerCaseIfNeed convert the charset / defCauslation of causet and its defCausumns to lower case, 308 // if the causet's version is prior to BlockInfoVersion3. 309 func ConvertCharsetDefCauslateToLowerCaseIfNeed(tbInfo *perceptron.BlockInfo) { 310 if tbInfo.Version >= perceptron.BlockInfoVersion3 { 311 return 312 } 313 tbInfo.Charset = strings.ToLower(tbInfo.Charset) 314 tbInfo.DefCauslate = strings.ToLower(tbInfo.DefCauslate) 315 for _, defCaus := range tbInfo.DeferredCausets { 316 defCaus.Charset = strings.ToLower(defCaus.Charset) 317 defCaus.DefCauslate = strings.ToLower(defCaus.DefCauslate) 318 } 319 } 320 321 // ConvertOldVersionUTF8ToUTF8MB4IfNeed convert old version UTF8 to UTF8MB4 if config.TreatOldVersionUTF8AsUTF8MB4 is enable. 322 func ConvertOldVersionUTF8ToUTF8MB4IfNeed(tbInfo *perceptron.BlockInfo) { 323 if tbInfo.Version >= perceptron.BlockInfoVersion2 || !config.GetGlobalConfig().TreatOldVersionUTF8AsUTF8MB4 { 324 return 325 } 326 if tbInfo.Charset == charset.CharsetUTF8 { 327 tbInfo.Charset = charset.CharsetUTF8MB4 328 tbInfo.DefCauslate = charset.DefCauslationUTF8MB4 329 } 330 for _, defCaus := range tbInfo.DeferredCausets { 331 if defCaus.Version < perceptron.DeferredCausetInfoVersion2 && defCaus.Charset == charset.CharsetUTF8 { 332 defCaus.Charset = charset.CharsetUTF8MB4 333 defCaus.DefCauslate = charset.DefCauslationUTF8MB4 334 } 335 } 336 } 337 338 func (b *Builder) applyDropBlock(dbInfo *perceptron.DBInfo, blockID int64, affected []int64) []int64 { 339 bucketIdx := blockBucketIdx(blockID) 340 sortedTbls := b.is.sortedBlocksBuckets[bucketIdx] 341 idx := sortedTbls.searchBlock(blockID) 342 if idx == -1 { 343 return affected 344 } 345 if blockNames, ok := b.is.schemaMap[dbInfo.Name.L]; ok { 346 tblInfo := sortedTbls[idx].Meta() 347 delete(blockNames.blocks, tblInfo.Name.L) 348 affected = appendAffectedIDs(affected, tblInfo) 349 } 350 // Remove the causet in sorted causet slice. 351 b.is.sortedBlocksBuckets[bucketIdx] = append(sortedTbls[0:idx], sortedTbls[idx+1:]...) 352 353 // The old DBInfo still holds a reference to old causet info, we need to remove it. 354 for i, tblInfo := range dbInfo.Blocks { 355 if tblInfo.ID == blockID { 356 if i == len(dbInfo.Blocks)-1 { 357 dbInfo.Blocks = dbInfo.Blocks[:i] 358 } else { 359 dbInfo.Blocks = append(dbInfo.Blocks[:i], dbInfo.Blocks[i+1:]...) 360 } 361 break 362 } 363 } 364 return affected 365 } 366 367 // InitWithOldSchemaReplicant initializes an empty new SchemaReplicant by copies all the data from old SchemaReplicant. 368 func (b *Builder) InitWithOldSchemaReplicant() *Builder { 369 oldIS := b.handle.Get().(*schemaReplicant) 370 b.is.schemaMetaVersion = oldIS.schemaMetaVersion 371 b.copySchemasMap(oldIS) 372 copy(b.is.sortedBlocksBuckets, oldIS.sortedBlocksBuckets) 373 return b 374 } 375 376 func (b *Builder) copySchemasMap(oldIS *schemaReplicant) { 377 for k, v := range oldIS.schemaMap { 378 b.is.schemaMap[k] = v 379 } 380 } 381 382 // copySchemaBlocks creates a new schemaBlocks instance when a causet in the database has changed. 383 // It also does modifications on the new one because old schemaBlocks must be read-only. 384 // Note: please make sure the dbName is in lowercase. 385 func (b *Builder) copySchemaBlocks(dbName string) *perceptron.DBInfo { 386 oldSchemaBlocks := b.is.schemaMap[dbName] 387 newSchemaBlocks := &schemaBlocks{ 388 dbInfo: oldSchemaBlocks.dbInfo.Copy(), 389 blocks: make(map[string]causet.Block, len(oldSchemaBlocks.blocks)), 390 } 391 for k, v := range oldSchemaBlocks.blocks { 392 newSchemaBlocks.blocks[k] = v 393 } 394 b.is.schemaMap[dbName] = newSchemaBlocks 395 return newSchemaBlocks.dbInfo 396 } 397 398 // InitWithDBInfos initializes an empty new SchemaReplicant with a slice of DBInfo and schemaReplicant version. 399 func (b *Builder) InitWithDBInfos(dbInfos []*perceptron.DBInfo, schemaVersion int64) (*Builder, error) { 400 info := b.is 401 info.schemaMetaVersion = schemaVersion 402 for _, di := range dbInfos { 403 err := b.createSchemaBlocksForDB(di, blocks.BlockFromMeta) 404 if err != nil { 405 return nil, errors.Trace(err) 406 } 407 } 408 409 // Initialize virtual blocks. 410 for _, driver := range drivers { 411 err := b.createSchemaBlocksForDB(driver.DBInfo, driver.BlockFromMeta) 412 if err != nil { 413 return nil, errors.Trace(err) 414 } 415 } 416 417 // Sort all blocks by `ID` 418 for _, v := range info.sortedBlocksBuckets { 419 sort.Sort(v) 420 } 421 return b, nil 422 } 423 424 type blockFromMetaFunc func(alloc autoid.SlabPredictors, tblInfo *perceptron.BlockInfo) (causet.Block, error) 425 426 func (b *Builder) createSchemaBlocksForDB(di *perceptron.DBInfo, blockFromMeta blockFromMetaFunc) error { 427 schTbls := &schemaBlocks{ 428 dbInfo: di, 429 blocks: make(map[string]causet.Block, len(di.Blocks)), 430 } 431 b.is.schemaMap[di.Name.L] = schTbls 432 for _, t := range di.Blocks { 433 allocs := autoid.NewSlabPredictorsFromTblInfo(b.handle.causetstore, di.ID, t) 434 var tbl causet.Block 435 tbl, err := blockFromMeta(allocs, t) 436 if err != nil { 437 return errors.Wrap(err, fmt.Sprintf("Build causet `%s`.`%s` schemaReplicant failed", di.Name.O, t.Name.O)) 438 } 439 schTbls.blocks[t.Name.L] = tbl 440 sortedTbls := b.is.sortedBlocksBuckets[blockBucketIdx(t.ID)] 441 b.is.sortedBlocksBuckets[blockBucketIdx(t.ID)] = append(sortedTbls, tbl) 442 } 443 return nil 444 } 445 446 type virtualBlockDriver struct { 447 *perceptron.DBInfo 448 BlockFromMeta func(alloc autoid.SlabPredictors, tblInfo *perceptron.BlockInfo) (causet.Block, error) 449 } 450 451 var drivers []*virtualBlockDriver 452 453 // RegisterVirtualBlock register virtual blocks to the builder. 454 func RegisterVirtualBlock(dbInfo *perceptron.DBInfo, blockFromMeta blockFromMetaFunc) { 455 drivers = append(drivers, &virtualBlockDriver{dbInfo, blockFromMeta}) 456 } 457 458 // Build sets new SchemaReplicant to the handle in the Builder. 459 func (b *Builder) Build() { 460 b.handle.value.CausetStore(b.is) 461 } 462 463 // NewBuilder creates a new Builder with a Handle. 464 func NewBuilder(handle *Handle) *Builder { 465 b := new(Builder) 466 b.handle = handle 467 b.is = &schemaReplicant{ 468 schemaMap: map[string]*schemaBlocks{}, 469 sortedBlocksBuckets: make([]sortedBlocks, bucketCount), 470 } 471 return b 472 } 473 474 func blockBucketIdx(blockID int64) int { 475 return int(blockID % bucketCount) 476 } 477 478 func blockIDIsValid(blockID int64) bool { 479 return blockID != 0 480 }