github.com/matrixorigin/matrixone@v0.7.0/pkg/vm/engine/disttae/db.go (about) 1 // Copyright 2022 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package disttae 16 17 import ( 18 "context" 19 20 "github.com/matrixorigin/matrixone/pkg/catalog" 21 "github.com/matrixorigin/matrixone/pkg/common/mpool" 22 "github.com/matrixorigin/matrixone/pkg/container/batch" 23 "github.com/matrixorigin/matrixone/pkg/container/vector" 24 "github.com/matrixorigin/matrixone/pkg/pb/timestamp" 25 "github.com/matrixorigin/matrixone/pkg/txn/client" 26 "github.com/matrixorigin/matrixone/pkg/vm/engine/disttae/cache" 27 ) 28 29 func newDB(dnList []DNStore) *DB { 30 dnMap := make(map[string]int) 31 for i := range dnList { 32 dnMap[dnList[i].UUID] = i 33 } 34 db := &DB{ 35 dnMap: dnMap, 36 metaTables: make(map[string]Partitions), 37 partitions: make(map[[2]uint64]Partitions), 38 } 39 return db 40 } 41 42 // init is used to insert some data that will not be synchronized by logtail. 43 func (db *DB) init(ctx context.Context, m *mpool.MPool, catalogCache *cache.CatalogCache) error { 44 db.Lock() 45 defer db.Unlock() 46 { 47 parts := make(Partitions, len(db.dnMap)) 48 for i := range parts { 49 parts[i] = NewPartition(nil) 50 } 51 db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_DATABASE_ID}] = parts 52 } 53 { 54 parts := make(Partitions, len(db.dnMap)) 55 for i := range parts { 56 parts[i] = NewPartition(nil) 57 } 58 db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_TABLES_ID}] = parts 59 } 60 { 61 parts := make(Partitions, len(db.dnMap)) 62 for i := range parts { 63 parts[i] = NewPartition(nil) 64 } 65 db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_COLUMNS_ID}] = parts 66 } 67 { // mo_catalog 68 part := db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_DATABASE_ID}][0] 69 bat, err := genCreateDatabaseTuple("", 0, 0, 0, catalog.MO_CATALOG, catalog.MO_CATALOG_ID, m) 70 if err != nil { 71 return err 72 } 73 ibat, err := genInsertBatch(bat, m) 74 if err != nil { 75 bat.Clean(m) 76 return err 77 } 78 if err := part.Insert(ctx, MO_PRIMARY_OFF, ibat, false); err != nil { 79 bat.Clean(m) 80 return err 81 } 82 catalogCache.InsertDatabase(bat) 83 bat.Clean(m) 84 } 85 { // mo_database 86 part := db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_TABLES_ID}][0] 87 cols, err := genColumns(0, catalog.MO_DATABASE, catalog.MO_CATALOG, catalog.MO_DATABASE_ID, 88 catalog.MO_CATALOG_ID, catalog.MoDatabaseTableDefs) 89 if err != nil { 90 return err 91 } 92 bat, err := genCreateTableTuple(new(table), "", 0, 0, 0, 93 catalog.MO_DATABASE, catalog.MO_DATABASE_ID, 94 catalog.MO_CATALOG_ID, catalog.MO_CATALOG, catalog.SystemOrdinaryRel, m) 95 if err != nil { 96 return err 97 } 98 ibat, err := genInsertBatch(bat, m) 99 if err != nil { 100 bat.Clean(m) 101 return err 102 } 103 if err := part.Insert(ctx, MO_PRIMARY_OFF+catalog.MO_TABLES_REL_ID_IDX, ibat, false); err != nil { 104 bat.Clean(m) 105 return err 106 } 107 catalogCache.InsertTable(bat) 108 bat.Clean(m) 109 part = db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_COLUMNS_ID}][0] 110 bat = batch.NewWithSize(len(catalog.MoColumnsSchema)) 111 bat.Attrs = append(bat.Attrs, catalog.MoColumnsSchema...) 112 bat.SetZs(len(cols), m) 113 for _, col := range cols { 114 bat0, err := genCreateColumnTuple(col, m) 115 if err != nil { 116 return err 117 } 118 if bat.Vecs[0] == nil { 119 for i, vec := range bat0.Vecs { 120 bat.Vecs[i] = vector.New(vec.GetType()) 121 } 122 } 123 for i, vec := range bat0.Vecs { 124 if err := vector.UnionOne(bat.Vecs[i], vec, 0, m); err != nil { 125 bat.Clean(m) 126 bat0.Clean(m) 127 return err 128 } 129 } 130 bat0.Clean(m) 131 } 132 ibat, err = genInsertBatch(bat, m) 133 if err != nil { 134 bat.Clean(m) 135 return err 136 } 137 if err := part.Insert(ctx, MO_PRIMARY_OFF+catalog.MO_COLUMNS_ATT_UNIQ_NAME_IDX, 138 ibat, false); err != nil { 139 bat.Clean(m) 140 return err 141 } 142 catalogCache.InsertColumns(bat) 143 bat.Clean(m) 144 } 145 { // mo_tables 146 part := db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_TABLES_ID}][0] 147 cols, err := genColumns(0, catalog.MO_TABLES, catalog.MO_CATALOG, catalog.MO_TABLES_ID, 148 catalog.MO_CATALOG_ID, catalog.MoTablesTableDefs) 149 if err != nil { 150 return err 151 } 152 bat, err := genCreateTableTuple(new(table), "", 0, 0, 0, catalog.MO_TABLES, catalog.MO_TABLES_ID, 153 catalog.MO_CATALOG_ID, catalog.MO_CATALOG, catalog.SystemOrdinaryRel, m) 154 if err != nil { 155 return err 156 } 157 ibat, err := genInsertBatch(bat, m) 158 if err != nil { 159 bat.Clean(m) 160 return err 161 } 162 if err := part.Insert(ctx, MO_PRIMARY_OFF+catalog.MO_TABLES_REL_ID_IDX, ibat, false); err != nil { 163 bat.Clean(m) 164 return err 165 } 166 catalogCache.InsertTable(bat) 167 bat.Clean(m) 168 part = db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_COLUMNS_ID}][0] 169 bat = batch.NewWithSize(len(catalog.MoColumnsSchema)) 170 bat.Attrs = append(bat.Attrs, catalog.MoColumnsSchema...) 171 bat.SetZs(len(cols), m) 172 for _, col := range cols { 173 bat0, err := genCreateColumnTuple(col, m) 174 if err != nil { 175 return err 176 } 177 if bat.Vecs[0] == nil { 178 for i, vec := range bat0.Vecs { 179 bat.Vecs[i] = vector.New(vec.GetType()) 180 } 181 } 182 for i, vec := range bat0.Vecs { 183 if err := vector.UnionOne(bat.Vecs[i], vec, 0, m); err != nil { 184 bat.Clean(m) 185 bat0.Clean(m) 186 return err 187 } 188 } 189 bat0.Clean(m) 190 } 191 ibat, err = genInsertBatch(bat, m) 192 if err != nil { 193 bat.Clean(m) 194 return err 195 } 196 if err := part.Insert(ctx, MO_PRIMARY_OFF+catalog.MO_COLUMNS_ATT_UNIQ_NAME_IDX, 197 ibat, false); err != nil { 198 bat.Clean(m) 199 return err 200 } 201 catalogCache.InsertColumns(bat) 202 bat.Clean(m) 203 } 204 { // mo_columns 205 part := db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_TABLES_ID}][0] 206 cols, err := genColumns(0, catalog.MO_COLUMNS, catalog.MO_CATALOG, catalog.MO_COLUMNS_ID, 207 catalog.MO_CATALOG_ID, catalog.MoColumnsTableDefs) 208 if err != nil { 209 return err 210 } 211 bat, err := genCreateTableTuple(new(table), "", 0, 0, 0, catalog.MO_COLUMNS, catalog.MO_COLUMNS_ID, 212 catalog.MO_CATALOG_ID, catalog.MO_CATALOG, catalog.SystemOrdinaryRel, m) 213 if err != nil { 214 return err 215 } 216 ibat, err := genInsertBatch(bat, m) 217 if err != nil { 218 bat.Clean(m) 219 return err 220 } 221 if err := part.Insert(ctx, MO_PRIMARY_OFF+catalog.MO_TABLES_REL_ID_IDX, ibat, false); err != nil { 222 bat.Clean(m) 223 return err 224 } 225 catalogCache.InsertTable(bat) 226 bat.Clean(m) 227 part = db.partitions[[2]uint64{catalog.MO_CATALOG_ID, catalog.MO_COLUMNS_ID}][0] 228 bat = batch.NewWithSize(len(catalog.MoColumnsSchema)) 229 bat.Attrs = append(bat.Attrs, catalog.MoColumnsSchema...) 230 bat.SetZs(len(cols), m) 231 for _, col := range cols { 232 bat0, err := genCreateColumnTuple(col, m) 233 if err != nil { 234 return err 235 } 236 if bat.Vecs[0] == nil { 237 for i, vec := range bat0.Vecs { 238 bat.Vecs[i] = vector.New(vec.GetType()) 239 } 240 } 241 for i, vec := range bat0.Vecs { 242 if err := vector.UnionOne(bat.Vecs[i], vec, 0, m); err != nil { 243 bat.Clean(m) 244 bat0.Clean(m) 245 return err 246 } 247 } 248 bat0.Clean(m) 249 } 250 ibat, err = genInsertBatch(bat, m) 251 if err != nil { 252 bat.Clean(m) 253 return err 254 } 255 if err := part.Insert(ctx, MO_PRIMARY_OFF+catalog.MO_COLUMNS_ATT_UNIQ_NAME_IDX, 256 ibat, false); err != nil { 257 bat.Clean(m) 258 return err 259 } 260 catalogCache.InsertColumns(bat) 261 bat.Clean(m) 262 } 263 return nil 264 } 265 266 func (db *DB) getMetaPartitions(name string) Partitions { 267 db.Lock() 268 parts, ok := db.metaTables[name] 269 if !ok { // create a new table 270 parts = make(Partitions, len(db.dnMap)) 271 for i := range parts { 272 parts[i] = NewPartition(nil) 273 } 274 db.metaTables[name] = parts 275 } 276 db.Unlock() 277 return parts 278 279 } 280 281 func (db *DB) getPartitions(databaseId, tableId uint64) Partitions { 282 db.Lock() 283 parts, ok := db.partitions[[2]uint64{databaseId, tableId}] 284 if !ok { // create a new table 285 parts = make(Partitions, len(db.dnMap)) 286 for i := range parts { 287 parts[i] = NewPartition(nil) 288 } 289 db.partitions[[2]uint64{databaseId, tableId}] = parts 290 } 291 db.Unlock() 292 return parts 293 } 294 295 func (db *DB) Update(ctx context.Context, dnList []DNStore, tbl *table, op client.TxnOperator, 296 primaryIdx int, databaseId, tableId uint64, ts timestamp.Timestamp) error { 297 db.Lock() 298 parts, ok := db.partitions[[2]uint64{databaseId, tableId}] 299 if !ok { // create a new table 300 parts = make(Partitions, len(db.dnMap)) 301 for i := range parts { 302 parts[i] = NewPartition(nil) 303 } 304 db.partitions[[2]uint64{databaseId, tableId}] = parts 305 } 306 db.Unlock() 307 308 for i, dn := range dnList { 309 part := parts[db.dnMap[dn.UUID]] 310 311 select { 312 case <-part.lock: 313 if part.ts.Greater(ts) || 314 part.ts.Equal(ts) { 315 part.lock <- struct{}{} 316 return nil 317 } 318 case <-ctx.Done(): 319 return ctx.Err() 320 } 321 322 if err := updatePartition( 323 i, primaryIdx, tbl, ts, ctx, op, db, part, dn, 324 genSyncLogTailReq(part.ts, ts, databaseId, tableId), 325 ); err != nil { 326 part.lock <- struct{}{} 327 return err 328 } 329 330 part.ts = ts 331 part.lock <- struct{}{} 332 } 333 334 return nil 335 }