github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/ccl/backupccl/targets.go (about) 1 // Copyright 2016 The Cockroach Authors. 2 // 3 // Licensed as a CockroachDB Enterprise file under the Cockroach Community 4 // License (the "License"); you may not use this file except in compliance with 5 // the License. You may obtain a copy of the License at 6 // 7 // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt 8 9 package backupccl 10 11 import ( 12 "context" 13 "sort" 14 15 "github.com/cockroachdb/cockroach/pkg/ccl/storageccl" 16 "github.com/cockroachdb/cockroach/pkg/keys" 17 "github.com/cockroachdb/cockroach/pkg/kv" 18 "github.com/cockroachdb/cockroach/pkg/sql" 19 "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" 20 "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" 21 "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" 22 "github.com/cockroachdb/cockroach/pkg/util/hlc" 23 "github.com/cockroachdb/errors" 24 ) 25 26 type descriptorsMatched struct { 27 // all tables that match targets plus their parent databases. 28 descs []sqlbase.Descriptor 29 30 // the databases from which all tables were matched (eg a.* or DATABASE a). 31 expandedDB []sqlbase.ID 32 33 // explicitly requested DBs (e.g. DATABASE a). 34 requestedDBs []*sqlbase.DatabaseDescriptor 35 } 36 37 func (d descriptorsMatched) checkExpansions(coveredDBs []sqlbase.ID) error { 38 covered := make(map[sqlbase.ID]bool) 39 for _, i := range coveredDBs { 40 covered[i] = true 41 } 42 for _, i := range d.requestedDBs { 43 if !covered[i.ID] { 44 return errors.Errorf("cannot RESTORE DATABASE from a backup of individual tables (use SHOW BACKUP to determine available tables)") 45 } 46 } 47 for _, i := range d.expandedDB { 48 if !covered[i] { 49 return errors.Errorf("cannot RESTORE <database>.* from a backup of individual tables (use SHOW BACKUP to determine available tables)") 50 } 51 } 52 return nil 53 } 54 55 // descriptorResolver is the helper struct that enables reuse of the 56 // standard name resolution algorithm. 57 type descriptorResolver struct { 58 descByID map[sqlbase.ID]sqlbase.Descriptor 59 // Map: db name -> dbID 60 dbsByName map[string]sqlbase.ID 61 // Map: dbID -> obj name -> obj ID 62 objsByName map[sqlbase.ID]map[string]sqlbase.ID 63 } 64 65 // LookupSchema implements the tree.ObjectNameTargetResolver interface. 66 func (r *descriptorResolver) LookupSchema( 67 _ context.Context, dbName, scName string, 68 ) (bool, tree.SchemaMeta, error) { 69 if scName != tree.PublicSchema { 70 return false, nil, nil 71 } 72 if dbID, ok := r.dbsByName[dbName]; ok { 73 return true, r.descByID[dbID], nil 74 } 75 return false, nil, nil 76 } 77 78 // LookupObject implements the tree.ObjectNameExistingResolver interface. 79 func (r *descriptorResolver) LookupObject( 80 _ context.Context, flags tree.ObjectLookupFlags, dbName, scName, obName string, 81 ) (bool, tree.NameResolutionResult, error) { 82 if flags.RequireMutable { 83 panic("did not expect request for mutable descriptor") 84 } 85 if scName != tree.PublicSchema { 86 return false, nil, nil 87 } 88 dbID, ok := r.dbsByName[dbName] 89 if !ok { 90 return false, nil, nil 91 } 92 if objMap, ok := r.objsByName[dbID]; ok { 93 if objID, ok := objMap[obName]; ok { 94 return true, r.descByID[objID], nil 95 } 96 } 97 return false, nil, nil 98 } 99 100 // newDescriptorResolver prepares a descriptorResolver for the given 101 // known set of descriptors. 102 func newDescriptorResolver(descs []sqlbase.Descriptor) (*descriptorResolver, error) { 103 r := &descriptorResolver{ 104 descByID: make(map[sqlbase.ID]sqlbase.Descriptor), 105 dbsByName: make(map[string]sqlbase.ID), 106 objsByName: make(map[sqlbase.ID]map[string]sqlbase.ID), 107 } 108 109 // Iterate to find the databases first. We need that because we also 110 // check the ParentID for tables, and all the valid parents must be 111 // known before we start to check that. 112 for _, desc := range descs { 113 if dbDesc := desc.GetDatabase(); dbDesc != nil { 114 if _, ok := r.dbsByName[dbDesc.Name]; ok { 115 return nil, errors.Errorf("duplicate database name: %q used for ID %d and %d", 116 dbDesc.Name, r.dbsByName[dbDesc.Name], dbDesc.ID) 117 } 118 r.dbsByName[dbDesc.Name] = dbDesc.ID 119 } 120 121 // Incidentally, also remember all the descriptors by ID. 122 if prevDesc, ok := r.descByID[desc.GetID()]; ok { 123 return nil, errors.Errorf("duplicate descriptor ID: %d used by %q and %q", 124 desc.GetID(), prevDesc.GetName(), desc.GetName()) 125 } 126 r.descByID[desc.GetID()] = desc 127 } 128 // Now on to the tables. 129 for _, desc := range descs { 130 if tbDesc := desc.Table(hlc.Timestamp{}); tbDesc != nil { 131 if tbDesc.Dropped() { 132 continue 133 } 134 parentDesc, ok := r.descByID[tbDesc.ParentID] 135 if !ok { 136 return nil, errors.Errorf("table %q has unknown ParentID %d", tbDesc.Name, tbDesc.ParentID) 137 } 138 if _, ok := r.dbsByName[parentDesc.GetName()]; !ok { 139 return nil, errors.Errorf("table %q's ParentID %d (%q) is not a database", 140 tbDesc.Name, tbDesc.ParentID, parentDesc.GetName()) 141 } 142 objMap := r.objsByName[parentDesc.GetID()] 143 if objMap == nil { 144 objMap = make(map[string]sqlbase.ID) 145 } 146 if _, ok := objMap[tbDesc.Name]; ok { 147 return nil, errors.Errorf("duplicate table name: %q.%q used for ID %d and %d", 148 parentDesc.GetName(), tbDesc.Name, tbDesc.ID, objMap[tbDesc.Name]) 149 } 150 objMap[tbDesc.Name] = tbDesc.ID 151 r.objsByName[parentDesc.GetID()] = objMap 152 } 153 } 154 155 return r, nil 156 } 157 158 // descriptorsMatchingTargets returns the descriptors that match the targets. A 159 // database descriptor is included in this set if it matches the targets (or the 160 // session database) or if one of its tables matches the targets. All expanded 161 // DBs, via either `foo.*` or `DATABASE foo` are noted, as are those explicitly 162 // named as DBs (e.g. with `DATABASE foo`, not `foo.*`). These distinctions are 163 // used e.g. by RESTORE. 164 // 165 // This is guaranteed to not return duplicates. 166 func descriptorsMatchingTargets( 167 ctx context.Context, 168 currentDatabase string, 169 searchPath sessiondata.SearchPath, 170 descriptors []sqlbase.Descriptor, 171 targets tree.TargetList, 172 ) (descriptorsMatched, error) { 173 // TODO(dan): once CockroachDB supports schemas in addition to 174 // catalogs, then this method will need to support it. 175 176 ret := descriptorsMatched{} 177 178 resolver, err := newDescriptorResolver(descriptors) 179 if err != nil { 180 return ret, err 181 } 182 183 alreadyRequestedDBs := make(map[sqlbase.ID]struct{}) 184 alreadyExpandedDBs := make(map[sqlbase.ID]struct{}) 185 // Process all the DATABASE requests. 186 for _, d := range targets.Databases { 187 dbID, ok := resolver.dbsByName[string(d)] 188 if !ok { 189 return ret, errors.Errorf("unknown database %q", d) 190 } 191 if _, ok := alreadyRequestedDBs[dbID]; !ok { 192 desc := resolver.descByID[dbID] 193 ret.descs = append(ret.descs, desc) 194 ret.requestedDBs = append(ret.requestedDBs, desc.GetDatabase()) 195 ret.expandedDB = append(ret.expandedDB, dbID) 196 alreadyRequestedDBs[dbID] = struct{}{} 197 alreadyExpandedDBs[dbID] = struct{}{} 198 } 199 } 200 201 // Process all the TABLE requests. 202 // Pulling in a table needs to pull in the underlying database too. 203 alreadyRequestedTables := make(map[sqlbase.ID]struct{}) 204 for _, pattern := range targets.Tables { 205 var err error 206 pattern, err = pattern.NormalizeTablePattern() 207 if err != nil { 208 return ret, err 209 } 210 211 switch p := pattern.(type) { 212 case *tree.TableName: 213 // TODO: As part of work for #34240, this should not be a TableName. 214 // Instead, it should be an UnresolvedObjectName. 215 un := p.ToUnresolvedObjectName() 216 found, prefix, descI, err := tree.ResolveExisting(ctx, un, resolver, tree.ObjectLookupFlags{}, currentDatabase, searchPath) 217 if err != nil { 218 return ret, err 219 } 220 p.ObjectNamePrefix = prefix 221 doesNotExistErr := errors.Errorf(`table %q does not exist`, tree.ErrString(p)) 222 if !found { 223 return ret, doesNotExistErr 224 } 225 desc := descI.(sqlbase.Descriptor) 226 tableDesc := desc.Table(hlc.Timestamp{}) 227 228 // Verify that the table is in the correct state. 229 if err := sqlbase.FilterTableState(tableDesc); err != nil { 230 // Return a does not exist error if explicitly asking for this table. 231 return ret, doesNotExistErr 232 } 233 234 // If the parent database is not requested already, request it now. 235 parentID := tableDesc.GetParentID() 236 if _, ok := alreadyRequestedDBs[parentID]; !ok { 237 parentDesc := resolver.descByID[parentID] 238 ret.descs = append(ret.descs, parentDesc) 239 alreadyRequestedDBs[parentID] = struct{}{} 240 } 241 // Then request the table itself. 242 if _, ok := alreadyRequestedTables[desc.GetID()]; !ok { 243 alreadyRequestedTables[desc.GetID()] = struct{}{} 244 ret.descs = append(ret.descs, desc) 245 } 246 247 case *tree.AllTablesSelector: 248 found, descI, err := p.ObjectNamePrefix.Resolve(ctx, resolver, currentDatabase, searchPath) 249 if err != nil { 250 return ret, err 251 } 252 if !found { 253 return ret, sqlbase.NewInvalidWildcardError(tree.ErrString(p)) 254 } 255 desc := descI.(sqlbase.Descriptor) 256 257 // If the database is not requested already, request it now. 258 dbID := desc.GetID() 259 if _, ok := alreadyRequestedDBs[dbID]; !ok { 260 ret.descs = append(ret.descs, desc) 261 alreadyRequestedDBs[dbID] = struct{}{} 262 } 263 264 // Then request the expansion. 265 if _, ok := alreadyExpandedDBs[desc.GetID()]; !ok { 266 ret.expandedDB = append(ret.expandedDB, desc.GetID()) 267 alreadyExpandedDBs[desc.GetID()] = struct{}{} 268 } 269 270 default: 271 return ret, errors.Errorf("unknown pattern %T: %+v", pattern, pattern) 272 } 273 } 274 275 // Then process the database expansions. 276 for dbID := range alreadyExpandedDBs { 277 for _, tblID := range resolver.objsByName[dbID] { 278 desc := resolver.descByID[tblID] 279 table := desc.Table(hlc.Timestamp{}) 280 if err := sqlbase.FilterTableState(table); err != nil { 281 // Don't include this table in the expansion since it's not in a valid 282 // state. Silently fail since this table was not directly requested, 283 // but was just part of an expansion. 284 continue 285 } 286 if _, ok := alreadyRequestedTables[tblID]; !ok { 287 ret.descs = append(ret.descs, desc) 288 } 289 } 290 } 291 292 return ret, nil 293 } 294 295 // getRelevantDescChanges finds the changes between start and end time to the 296 // SQL descriptors matching `descs` or `expandedDBs`, ordered by time. A 297 // descriptor revision matches if it is an earlier revision of a descriptor in 298 // descs (same ID) or has parentID in `expanded`. Deleted descriptors are 299 // represented as nil. Fills in the `priorIDs` map in the process, which maps 300 // a descriptor the the ID by which it was previously known (e.g pre-TRUNCATE). 301 func getRelevantDescChanges( 302 ctx context.Context, 303 db *kv.DB, 304 startTime, endTime hlc.Timestamp, 305 descs []sqlbase.Descriptor, 306 expanded []sqlbase.ID, 307 priorIDs map[sqlbase.ID]sqlbase.ID, 308 ) ([]BackupManifest_DescriptorRevision, error) { 309 310 allChanges, err := getAllDescChanges(ctx, db, startTime, endTime, priorIDs) 311 if err != nil { 312 return nil, err 313 } 314 315 // If no descriptors changed, we can just stop now and have RESTORE use the 316 // normal list of descs (i.e. as of endTime). 317 if len(allChanges) == 0 { 318 return nil, nil 319 } 320 321 // interestingChanges will be every descriptor change relevant to the backup. 322 var interestingChanges []BackupManifest_DescriptorRevision 323 324 // interestingIDs are the descriptor for which we're interested in capturing 325 // changes. This is initially the descriptors matched (as of endTime) by our 326 // target spec, plus those that belonged to a DB that our spec expanded at any 327 // point in the interval. 328 interestingIDs := make(map[sqlbase.ID]struct{}, len(descs)) 329 330 // The descriptors that currently (endTime) match the target spec (desc) are 331 // obviously interesting to our backup. 332 for _, i := range descs { 333 interestingIDs[i.GetID()] = struct{}{} 334 if t := i.Table(hlc.Timestamp{}); t != nil { 335 for j := t.ReplacementOf.ID; j != sqlbase.InvalidID; j = priorIDs[j] { 336 interestingIDs[j] = struct{}{} 337 } 338 } 339 } 340 341 // We're also interested in any desc that belonged to a DB we're backing up. 342 // We'll start by looking at all descriptors as of the beginning of the 343 // interval and add to the set of IDs that we are interested any descriptor that 344 // belongs to one of the parents we care about. 345 interestingParents := make(map[sqlbase.ID]struct{}, len(expanded)) 346 for _, i := range expanded { 347 interestingParents[i] = struct{}{} 348 } 349 350 if !startTime.IsEmpty() { 351 starting, err := loadAllDescs(ctx, db, startTime) 352 if err != nil { 353 return nil, err 354 } 355 for _, i := range starting { 356 if table := i.Table(hlc.Timestamp{}); table != nil { 357 // We need to add to interestingIDs so that if we later see a delete for 358 // this ID we still know it is interesting to us, even though we will not 359 // have a parentID at that point (since the delete is a nil desc). 360 if _, ok := interestingParents[table.ParentID]; ok { 361 interestingIDs[table.ID] = struct{}{} 362 } 363 } 364 if _, ok := interestingIDs[i.GetID()]; ok { 365 desc := i 366 // We inject a fake "revision" that captures the starting state for 367 // matched descriptor, to allow restoring to times before its first rev 368 // actually inside the window. This likely ends up duplicating the last 369 // version in the previous BACKUP descriptor, but avoids adding more 370 // complicated special-cases in RESTORE, so it only needs to look in a 371 // single BACKUP to restore to a particular time. 372 initial := BackupManifest_DescriptorRevision{Time: startTime, ID: i.GetID(), Desc: &desc} 373 interestingChanges = append(interestingChanges, initial) 374 } 375 } 376 } 377 378 for _, change := range allChanges { 379 // A change to an ID that we are interested in is obviously interesting -- 380 // a change is also interesting if it is to a table that has a parent that 381 // we are interested and thereafter it also becomes an ID in which we are 382 // interested in changes (since, as mentioned above, to decide if deletes 383 // are interesting). 384 if _, ok := interestingIDs[change.ID]; ok { 385 interestingChanges = append(interestingChanges, change) 386 } else if change.Desc != nil { 387 if table := change.Desc.Table(hlc.Timestamp{}); table != nil { 388 if _, ok := interestingParents[table.ParentID]; ok { 389 interestingIDs[table.ID] = struct{}{} 390 interestingChanges = append(interestingChanges, change) 391 } 392 } 393 } 394 } 395 396 sort.Slice(interestingChanges, func(i, j int) bool { 397 return interestingChanges[i].Time.Less(interestingChanges[j].Time) 398 }) 399 400 return interestingChanges, nil 401 } 402 403 // getAllDescChanges gets every sql descriptor change between start and end time 404 // returning its ID, content and the change time (with deletions represented as 405 // nil content). 406 func getAllDescChanges( 407 ctx context.Context, 408 db *kv.DB, 409 startTime, endTime hlc.Timestamp, 410 priorIDs map[sqlbase.ID]sqlbase.ID, 411 ) ([]BackupManifest_DescriptorRevision, error) { 412 startKey := keys.TODOSQLCodec.TablePrefix(keys.DescriptorTableID) 413 endKey := startKey.PrefixEnd() 414 415 allRevs, err := storageccl.GetAllRevisions(ctx, db, startKey, endKey, startTime, endTime) 416 if err != nil { 417 return nil, err 418 } 419 420 var res []BackupManifest_DescriptorRevision 421 422 for _, revs := range allRevs { 423 id, err := keys.TODOSQLCodec.DecodeDescMetadataID(revs.Key) 424 if err != nil { 425 return nil, err 426 } 427 for _, rev := range revs.Values { 428 r := BackupManifest_DescriptorRevision{ID: sqlbase.ID(id), Time: rev.Timestamp} 429 if len(rev.RawBytes) != 0 { 430 var desc sqlbase.Descriptor 431 if err := rev.GetProto(&desc); err != nil { 432 return nil, err 433 } 434 r.Desc = &desc 435 t := desc.Table(rev.Timestamp) 436 if t != nil && t.ReplacementOf.ID != sqlbase.InvalidID { 437 priorIDs[t.ID] = t.ReplacementOf.ID 438 } 439 } 440 res = append(res, r) 441 } 442 } 443 return res, nil 444 } 445 446 func allSQLDescriptors(ctx context.Context, txn *kv.Txn) ([]sqlbase.Descriptor, error) { 447 startKey := keys.TODOSQLCodec.TablePrefix(keys.DescriptorTableID) 448 endKey := startKey.PrefixEnd() 449 rows, err := txn.Scan(ctx, startKey, endKey, 0) 450 if err != nil { 451 return nil, err 452 } 453 454 sqlDescs := make([]sqlbase.Descriptor, len(rows)) 455 for i, row := range rows { 456 if err := row.ValueProto(&sqlDescs[i]); err != nil { 457 return nil, errors.NewAssertionErrorWithWrappedErrf(err, 458 "%s: unable to unmarshal SQL descriptor", row.Key) 459 } 460 if row.Value != nil { 461 sqlDescs[i].Table(row.Value.Timestamp) 462 } 463 } 464 return sqlDescs, nil 465 } 466 467 func ensureInterleavesIncluded(tables []*sqlbase.TableDescriptor) error { 468 inBackup := make(map[sqlbase.ID]bool, len(tables)) 469 for _, t := range tables { 470 inBackup[t.ID] = true 471 } 472 473 for _, table := range tables { 474 if err := table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error { 475 for _, a := range index.Interleave.Ancestors { 476 if !inBackup[a.TableID] { 477 return errors.Errorf( 478 "cannot backup table %q without interleave parent (ID %d)", table.Name, a.TableID, 479 ) 480 } 481 } 482 for _, c := range index.InterleavedBy { 483 if !inBackup[c.Table] { 484 return errors.Errorf( 485 "cannot backup table %q without interleave child table (ID %d)", table.Name, c.Table, 486 ) 487 } 488 } 489 return nil 490 }); err != nil { 491 return err 492 } 493 } 494 return nil 495 } 496 497 func loadAllDescs( 498 ctx context.Context, db *kv.DB, asOf hlc.Timestamp, 499 ) ([]sqlbase.Descriptor, error) { 500 var allDescs []sqlbase.Descriptor 501 if err := db.Txn( 502 ctx, 503 func(ctx context.Context, txn *kv.Txn) error { 504 var err error 505 txn.SetFixedTimestamp(ctx, asOf) 506 allDescs, err = allSQLDescriptors(ctx, txn) 507 return err 508 }); err != nil { 509 return nil, err 510 } 511 return allDescs, nil 512 } 513 514 // ResolveTargetsToDescriptors performs name resolution on a set of targets and 515 // returns the resulting descriptors. 516 func ResolveTargetsToDescriptors( 517 ctx context.Context, 518 p sql.PlanHookState, 519 endTime hlc.Timestamp, 520 targets tree.TargetList, 521 descriptorCoverage tree.DescriptorCoverage, 522 ) ([]sqlbase.Descriptor, []sqlbase.ID, error) { 523 allDescs, err := loadAllDescs(ctx, p.ExecCfg().DB, endTime) 524 if err != nil { 525 return nil, nil, err 526 } 527 528 if descriptorCoverage == tree.AllDescriptors { 529 return fullClusterTargetsBackup(allDescs) 530 } 531 532 var matched descriptorsMatched 533 if matched, err = descriptorsMatchingTargets(ctx, 534 p.CurrentDatabase(), p.CurrentSearchPath(), allDescs, targets); err != nil { 535 return nil, nil, err 536 } 537 538 // Ensure interleaved tables appear after their parent. Since parents must be 539 // created before their children, simply sorting by ID accomplishes this. 540 sort.Slice(matched.descs, func(i, j int) bool { return matched.descs[i].GetID() < matched.descs[j].GetID() }) 541 return matched.descs, matched.expandedDB, nil 542 } 543 544 // fullClusterTargetsBackup returns the same descriptors referenced in 545 // fullClusterTargets, but rather than returning the entire database 546 // descriptor as the second argument, it only returns their IDs. 547 func fullClusterTargetsBackup( 548 allDescs []sqlbase.Descriptor, 549 ) ([]sqlbase.Descriptor, []sqlbase.ID, error) { 550 fullClusterDescs, fullClusterDBs, err := fullClusterTargets(allDescs) 551 if err != nil { 552 return nil, nil, err 553 } 554 555 fullClusterDBIDs := make([]sqlbase.ID, 0) 556 for _, desc := range fullClusterDBs { 557 fullClusterDBIDs = append(fullClusterDBIDs, desc.GetID()) 558 } 559 return fullClusterDescs, fullClusterDBIDs, nil 560 } 561 562 // fullClusterTargets returns all of the tableDescriptors to be included in a 563 // full cluster backup, and all the user databases. 564 func fullClusterTargets( 565 allDescs []sqlbase.Descriptor, 566 ) ([]sqlbase.Descriptor, []*sqlbase.DatabaseDescriptor, error) { 567 fullClusterDescs := make([]sqlbase.Descriptor, 0, len(allDescs)) 568 fullClusterDBs := make([]*sqlbase.DatabaseDescriptor, 0) 569 570 systemTablesToBackup := make(map[string]struct{}, len(fullClusterSystemTables)) 571 for _, tableName := range fullClusterSystemTables { 572 systemTablesToBackup[tableName] = struct{}{} 573 } 574 575 for _, desc := range allDescs { 576 if dbDesc := desc.GetDatabase(); dbDesc != nil { 577 fullClusterDescs = append(fullClusterDescs, desc) 578 if dbDesc.ID != sqlbase.SystemDB.ID { 579 // The only database that isn't being fully backed up is the system DB. 580 fullClusterDBs = append(fullClusterDBs, dbDesc) 581 } 582 } 583 if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil { 584 if tableDesc.ParentID == sqlbase.SystemDB.ID { 585 // Add only the system tables that we plan to include in a full cluster 586 // backup. 587 if _, ok := systemTablesToBackup[tableDesc.Name]; ok { 588 fullClusterDescs = append(fullClusterDescs, desc) 589 } 590 } else { 591 // Add all user tables that are not in a DROP state. 592 if tableDesc.State != sqlbase.TableDescriptor_DROP { 593 fullClusterDescs = append(fullClusterDescs, desc) 594 } 595 } 596 } 597 } 598 return fullClusterDescs, fullClusterDBs, nil 599 } 600 601 func lookupDatabaseID( 602 ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, name string, 603 ) (sqlbase.ID, error) { 604 found, id, err := sqlbase.LookupDatabaseID(ctx, txn, codec, name) 605 if err != nil { 606 return sqlbase.InvalidID, err 607 } 608 if !found { 609 return sqlbase.InvalidID, errors.Errorf("could not find ID for database %s", name) 610 } 611 return id, nil 612 } 613 614 // CheckTableExists returns an error if a table already exists with given 615 // parent and name. 616 func CheckTableExists( 617 ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, parentID sqlbase.ID, name string, 618 ) error { 619 found, _, err := sqlbase.LookupPublicTableID(ctx, txn, codec, parentID, name) 620 if err != nil { 621 return err 622 } 623 if found { 624 return sqlbase.NewRelationAlreadyExistsError(name) 625 } 626 return nil 627 } 628 629 func fullClusterTargetsRestore( 630 allDescs []sqlbase.Descriptor, 631 ) ([]sqlbase.Descriptor, []*sqlbase.DatabaseDescriptor, error) { 632 fullClusterDescs, fullClusterDBs, err := fullClusterTargets(allDescs) 633 if err != nil { 634 return nil, nil, err 635 } 636 filteredDescs := make([]sqlbase.Descriptor, 0, len(fullClusterDescs)) 637 for _, desc := range fullClusterDescs { 638 if _, isDefaultDB := sqlbase.DefaultUserDBs[desc.GetName()]; !isDefaultDB && desc.GetID() != sqlbase.SystemDB.ID { 639 filteredDescs = append(filteredDescs, desc) 640 } 641 } 642 filteredDBs := make([]*sqlbase.DatabaseDescriptor, 0, len(fullClusterDBs)) 643 for _, db := range fullClusterDBs { 644 if _, isDefaultDB := sqlbase.DefaultUserDBs[db.GetName()]; !isDefaultDB && db.GetID() != sqlbase.SystemDB.ID { 645 filteredDBs = append(filteredDBs, db) 646 } 647 } 648 649 return filteredDescs, filteredDBs, nil 650 } 651 652 func selectTargets( 653 ctx context.Context, 654 p sql.PlanHookState, 655 backupManifests []BackupManifest, 656 targets tree.TargetList, 657 descriptorCoverage tree.DescriptorCoverage, 658 asOf hlc.Timestamp, 659 ) ([]sqlbase.Descriptor, []*sqlbase.DatabaseDescriptor, error) { 660 allDescs, lastBackupManifest := loadSQLDescsFromBackupsAtTime(backupManifests, asOf) 661 662 if descriptorCoverage == tree.AllDescriptors { 663 return fullClusterTargetsRestore(allDescs) 664 } 665 666 matched, err := descriptorsMatchingTargets(ctx, 667 p.CurrentDatabase(), p.CurrentSearchPath(), allDescs, targets) 668 if err != nil { 669 return nil, nil, err 670 } 671 672 if len(matched.descs) == 0 { 673 return nil, nil, errors.Errorf("no tables or databases matched the given targets: %s", tree.ErrString(&targets)) 674 } 675 676 if lastBackupManifest.FormatVersion >= BackupFormatDescriptorTrackingVersion { 677 if err := matched.checkExpansions(lastBackupManifest.CompleteDbs); err != nil { 678 return nil, nil, err 679 } 680 } 681 682 return matched.descs, matched.requestedDBs, nil 683 }