github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/ccl/backupccl/restore_planning.go (about)

     1  // Copyright 2016 The Cockroach Authors.
     2  //
     3  // Licensed as a CockroachDB Enterprise file under the Cockroach Community
     4  // License (the "License"); you may not use this file except in compliance with
     5  // the License. You may obtain a copy of the License at
     6  //
     7  //     https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
     8  
     9  package backupccl
    10  
    11  import (
    12  	"context"
    13  	"sort"
    14  
    15  	"github.com/cockroachdb/cockroach/pkg/ccl/storageccl"
    16  	"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
    17  	"github.com/cockroachdb/cockroach/pkg/jobs"
    18  	"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
    19  	"github.com/cockroachdb/cockroach/pkg/keys"
    20  	"github.com/cockroachdb/cockroach/pkg/kv"
    21  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    22  	"github.com/cockroachdb/cockroach/pkg/server/telemetry"
    23  	"github.com/cockroachdb/cockroach/pkg/sql"
    24  	"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
    25  	"github.com/cockroachdb/cockroach/pkg/sql/covering"
    26  	"github.com/cockroachdb/cockroach/pkg/sql/parser"
    27  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
    28  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
    29  	"github.com/cockroachdb/cockroach/pkg/sql/privilege"
    30  	"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
    31  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    32  	"github.com/cockroachdb/cockroach/pkg/sql/types"
    33  	"github.com/cockroachdb/cockroach/pkg/storage/cloud"
    34  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    35  	"github.com/cockroachdb/cockroach/pkg/util/protoutil"
    36  	"github.com/cockroachdb/cockroach/pkg/util/tracing"
    37  	"github.com/cockroachdb/errors"
    38  )
    39  
    40  // TableRewriteMap maps old table IDs to new table and parent IDs.
    41  type TableRewriteMap map[sqlbase.ID]*jobspb.RestoreDetails_TableRewrite
    42  
    43  const (
    44  	restoreOptIntoDB               = "into_db"
    45  	restoreOptSkipMissingFKs       = "skip_missing_foreign_keys"
    46  	restoreOptSkipMissingSequences = "skip_missing_sequences"
    47  	restoreOptSkipMissingViews     = "skip_missing_views"
    48  
    49  	// The temporary database system tables will be restored into for full
    50  	// cluster backups.
    51  	restoreTempSystemDB = "crdb_temp_system"
    52  )
    53  
    54  var restoreOptionExpectValues = map[string]sql.KVStringOptValidate{
    55  	restoreOptIntoDB:               sql.KVStringOptRequireValue,
    56  	restoreOptSkipMissingFKs:       sql.KVStringOptRequireNoValue,
    57  	restoreOptSkipMissingSequences: sql.KVStringOptRequireNoValue,
    58  	restoreOptSkipMissingViews:     sql.KVStringOptRequireNoValue,
    59  	backupOptEncPassphrase:         sql.KVStringOptRequireValue,
    60  }
    61  
    62  // rewriteViewQueryDBNames rewrites the passed table's ViewQuery replacing all
    63  // non-empty db qualifiers with `newDB`.
    64  //
    65  // TODO: this AST traversal misses tables named in strings (#24556).
    66  func rewriteViewQueryDBNames(table *sqlbase.TableDescriptor, newDB string) error {
    67  	stmt, err := parser.ParseOne(table.ViewQuery)
    68  	if err != nil {
    69  		return pgerror.Wrapf(err, pgcode.Syntax,
    70  			"failed to parse underlying query from view %q", table.Name)
    71  	}
    72  	// Re-format to change all DB names to `newDB`.
    73  	f := tree.NewFmtCtx(tree.FmtParsable)
    74  	f.SetReformatTableNames(func(ctx *tree.FmtCtx, tn *tree.TableName) {
    75  		// empty catalog e.g. ``"".information_schema.tables` should stay empty.
    76  		if tn.CatalogName != "" {
    77  			tn.CatalogName = tree.Name(newDB)
    78  		}
    79  		ctx.WithReformatTableNames(nil, func() {
    80  			ctx.FormatNode(tn)
    81  		})
    82  	})
    83  	f.FormatNode(stmt.AST)
    84  	table.ViewQuery = f.CloseAndGetString()
    85  	return nil
    86  }
    87  
    88  // maybeFilterMissingViews filters the set of tables to restore to exclude views
    89  // whose dependencies are either missing or are themselves unrestorable due to
    90  // missing dependencies, and returns the resulting set of tables. If the
    91  // restoreOptSkipMissingViews option is not set, an error is returned if any
    92  // unrestorable views are found.
    93  func maybeFilterMissingViews(
    94  	tablesByID map[sqlbase.ID]*sqlbase.TableDescriptor, opts map[string]string,
    95  ) (map[sqlbase.ID]*sqlbase.TableDescriptor, error) {
    96  	// Function that recursively determines whether a given table, if it is a
    97  	// view, has valid dependencies. Dependencies are looked up in tablesByID.
    98  	var hasValidViewDependencies func(*sqlbase.TableDescriptor) bool
    99  	hasValidViewDependencies = func(desc *sqlbase.TableDescriptor) bool {
   100  		if !desc.IsView() {
   101  			return true
   102  		}
   103  		for _, id := range desc.DependsOn {
   104  			if desc, ok := tablesByID[id]; !ok || !hasValidViewDependencies(desc) {
   105  				return false
   106  			}
   107  		}
   108  		return true
   109  	}
   110  
   111  	filteredTablesByID := make(map[sqlbase.ID]*sqlbase.TableDescriptor)
   112  	for id, table := range tablesByID {
   113  		if hasValidViewDependencies(table) {
   114  			filteredTablesByID[id] = table
   115  		} else {
   116  			if _, ok := opts[restoreOptSkipMissingViews]; !ok {
   117  				return nil, errors.Errorf(
   118  					"cannot restore view %q without restoring referenced table (or %q option)",
   119  					table.Name, restoreOptSkipMissingViews,
   120  				)
   121  			}
   122  		}
   123  	}
   124  	return filteredTablesByID, nil
   125  }
   126  
   127  // allocateTableRewrites determines the new ID and parentID (a "TableRewrite")
   128  // for each table in sqlDescs and returns a mapping from old ID to said
   129  // TableRewrite. It first validates that the provided sqlDescs can be restored
   130  // into their original database (or the database specified in opts) to avoid
   131  // leaking table IDs if we can be sure the restore would fail.
   132  func allocateTableRewrites(
   133  	ctx context.Context,
   134  	p sql.PlanHookState,
   135  	databasesByID map[sqlbase.ID]*sql.DatabaseDescriptor,
   136  	tablesByID map[sqlbase.ID]*sql.TableDescriptor,
   137  	restoreDBs []*sqlbase.DatabaseDescriptor,
   138  	descriptorCoverage tree.DescriptorCoverage,
   139  	opts map[string]string,
   140  ) (TableRewriteMap, error) {
   141  	tableRewrites := make(TableRewriteMap)
   142  	overrideDB, renaming := opts[restoreOptIntoDB]
   143  
   144  	restoreDBNames := make(map[string]*sqlbase.DatabaseDescriptor, len(restoreDBs))
   145  	for _, db := range restoreDBs {
   146  		restoreDBNames[db.Name] = db
   147  	}
   148  
   149  	if len(restoreDBNames) > 0 && renaming {
   150  		return nil, errors.Errorf("cannot use %q option when restoring database(s)", restoreOptIntoDB)
   151  	}
   152  
   153  	// The logic at the end of this function leaks table IDs, so fail fast if
   154  	// we can be certain the restore will fail.
   155  
   156  	// Fail fast if the tables to restore are incompatible with the specified
   157  	// options.
   158  	maxDescIDInBackup := int64(keys.MinNonPredefinedUserDescID)
   159  	for _, table := range tablesByID {
   160  		if int64(table.ID) > maxDescIDInBackup {
   161  			maxDescIDInBackup = int64(table.ID)
   162  		}
   163  		// Check that foreign key targets exist.
   164  		for i := range table.OutboundFKs {
   165  			fk := &table.OutboundFKs[i]
   166  			if _, ok := tablesByID[fk.ReferencedTableID]; !ok {
   167  				if _, ok := opts[restoreOptSkipMissingFKs]; !ok {
   168  					return nil, errors.Errorf(
   169  						"cannot restore table %q without referenced table %d (or %q option)",
   170  						table.Name, fk.ReferencedTableID, restoreOptSkipMissingFKs,
   171  					)
   172  				}
   173  			}
   174  		}
   175  
   176  		// Check that referenced sequences exist.
   177  		for i := range table.Columns {
   178  			col := &table.Columns[i]
   179  			for _, seqID := range col.UsesSequenceIds {
   180  				if _, ok := tablesByID[seqID]; !ok {
   181  					if _, ok := opts[restoreOptSkipMissingSequences]; !ok {
   182  						return nil, errors.Errorf(
   183  							"cannot restore table %q without referenced sequence %d (or %q option)",
   184  							table.Name, seqID, restoreOptSkipMissingSequences,
   185  						)
   186  					}
   187  				}
   188  			}
   189  		}
   190  	}
   191  
   192  	needsNewParentIDs := make(map[string][]sqlbase.ID)
   193  
   194  	// Increment the DescIDSequenceKey so that it is higher than the max desc ID
   195  	// in the backup. This generator keeps produced the next descriptor ID.
   196  	var tempSysDBID sqlbase.ID
   197  	if descriptorCoverage == tree.AllDescriptors {
   198  		var err error
   199  		// Restore the key which generates descriptor IDs.
   200  		if err = p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
   201  			b := txn.NewBatch()
   202  			// N.B. This key is usually mutated using the Inc command. That
   203  			// command warns that if the key was every Put directly, Inc will
   204  			// return an error. This is only to ensure that the type of the key
   205  			// doesn't change. Here we just need to be very careful that we only
   206  			// write int64 values.
   207  			// The generator's value should be set to the value of the next ID
   208  			// to generate.
   209  			b.Put(p.ExecCfg().Codec.DescIDSequenceKey(), maxDescIDInBackup+1)
   210  			return txn.Run(ctx, b)
   211  		}); err != nil {
   212  			return nil, err
   213  		}
   214  		tempSysDBID, err = catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
   215  		if err != nil {
   216  			return nil, err
   217  		}
   218  		tableRewrites[tempSysDBID] = &jobspb.RestoreDetails_TableRewrite{TableID: tempSysDBID}
   219  	}
   220  
   221  	// Fail fast if the necessary databases don't exist or are otherwise
   222  	// incompatible with this restore.
   223  	if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
   224  		// Check that any DBs being restored do _not_ exist.
   225  		for name := range restoreDBNames {
   226  			found, _, err := sqlbase.LookupDatabaseID(ctx, txn, p.ExecCfg().Codec, name)
   227  			if err != nil {
   228  				return err
   229  			}
   230  			if found {
   231  				return errors.Errorf("database %q already exists", name)
   232  			}
   233  		}
   234  
   235  		for _, table := range tablesByID {
   236  			var targetDB string
   237  			if renaming {
   238  				targetDB = overrideDB
   239  			} else if descriptorCoverage == tree.AllDescriptors && table.ParentID < sqlbase.MaxDefaultDescriptorID {
   240  				// This is a table that is in a database that already existed at
   241  				// cluster creation time.
   242  				defaultDBID, err := lookupDatabaseID(ctx, txn, p.ExecCfg().Codec, sqlbase.DefaultDatabaseName)
   243  				if err != nil {
   244  					return err
   245  				}
   246  				postgresDBID, err := lookupDatabaseID(ctx, txn, p.ExecCfg().Codec, sqlbase.PgDatabaseName)
   247  				if err != nil {
   248  					return err
   249  				}
   250  
   251  				if table.ParentID == sqlbase.SystemDB.ID {
   252  					// For full cluster backups, put the system tables in the temporary
   253  					// system table.
   254  					targetDB = restoreTempSystemDB
   255  					tableRewrites[table.ID] = &jobspb.RestoreDetails_TableRewrite{ParentID: tempSysDBID}
   256  				} else if table.ParentID == defaultDBID {
   257  					targetDB = sqlbase.DefaultDatabaseName
   258  				} else if table.ParentID == postgresDBID {
   259  					targetDB = sqlbase.PgDatabaseName
   260  				}
   261  			} else {
   262  				database, ok := databasesByID[table.ParentID]
   263  				if !ok {
   264  					return errors.Errorf("no database with ID %d in backup for table %q",
   265  						table.ParentID, table.Name)
   266  				}
   267  				targetDB = database.Name
   268  			}
   269  
   270  			if _, ok := restoreDBNames[targetDB]; ok {
   271  				needsNewParentIDs[targetDB] = append(needsNewParentIDs[targetDB], table.ID)
   272  			} else if descriptorCoverage == tree.AllDescriptors {
   273  				// Set the remapped ID to the original parent ID, except for system tables which
   274  				// should be RESTOREd to the temporary system database.
   275  				if targetDB != restoreTempSystemDB {
   276  					tableRewrites[table.ID] = &jobspb.RestoreDetails_TableRewrite{ParentID: table.ParentID}
   277  				}
   278  			} else {
   279  				var parentID sqlbase.ID
   280  				{
   281  					found, newParentID, err := sqlbase.LookupDatabaseID(ctx, txn, p.ExecCfg().Codec, targetDB)
   282  					if err != nil {
   283  						return err
   284  					}
   285  					if !found {
   286  						return errors.Errorf("a database named %q needs to exist to restore table %q",
   287  							targetDB, table.Name)
   288  					}
   289  					parentID = newParentID
   290  				}
   291  				// Check that the table name is _not_ in use.
   292  				// This would fail the CPut later anyway, but this yields a prettier error.
   293  				if err := CheckTableExists(ctx, txn, p.ExecCfg().Codec, parentID, table.Name); err != nil {
   294  					return err
   295  				}
   296  
   297  				// Check privileges.
   298  				{
   299  					parentDB, err := sqlbase.GetDatabaseDescFromID(ctx, txn, p.ExecCfg().Codec, parentID)
   300  					if err != nil {
   301  						return errors.Wrapf(err,
   302  							"failed to lookup parent DB %d", errors.Safe(parentID))
   303  					}
   304  
   305  					if err := p.CheckPrivilege(ctx, parentDB, privilege.CREATE); err != nil {
   306  						return err
   307  					}
   308  				}
   309  				// Create the table rewrite with the new parent ID. We've done all the
   310  				// up-front validation that we can.
   311  				tableRewrites[table.ID] = &jobspb.RestoreDetails_TableRewrite{ParentID: parentID}
   312  			}
   313  		}
   314  		return nil
   315  	}); err != nil {
   316  		return nil, err
   317  	}
   318  
   319  	// Allocate new IDs for each database and table.
   320  	//
   321  	// NB: we do this in a standalone transaction, not one that covers the
   322  	// entire restore since restarts would be terrible (and our bulk import
   323  	// primitive are non-transactional), but this does mean if something fails
   324  	// during restore we've "leaked" the IDs, in that the generator will have
   325  	// been incremented.
   326  	//
   327  	// NB: The ordering of the new IDs must be the same as the old ones,
   328  	// otherwise the keys may sort differently after they're rekeyed. We could
   329  	// handle this by chunking the AddSSTable calls more finely in Import, but
   330  	// it would be a big performance hit.
   331  
   332  	for _, db := range restoreDBs {
   333  		var newID sqlbase.ID
   334  		var err error
   335  		if descriptorCoverage == tree.AllDescriptors {
   336  			newID = db.ID
   337  		} else {
   338  			newID, err = catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
   339  			if err != nil {
   340  				return nil, err
   341  			}
   342  		}
   343  
   344  		tableRewrites[db.ID] = &jobspb.RestoreDetails_TableRewrite{TableID: newID}
   345  		for _, tableID := range needsNewParentIDs[db.Name] {
   346  			tableRewrites[tableID] = &jobspb.RestoreDetails_TableRewrite{ParentID: newID}
   347  		}
   348  	}
   349  
   350  	// tablesToRemap usually contains all tables that are being restored. In a
   351  	// full cluster restore this should only include the system tables that need
   352  	// to be remapped to the temporary table. All other tables in a full cluster
   353  	// backup should have the same ID as they do in the backup.
   354  	tablesToRemap := make([]*sqlbase.TableDescriptor, 0, len(tablesByID))
   355  	for _, table := range tablesByID {
   356  		if descriptorCoverage == tree.AllDescriptors {
   357  			if table.ParentID == sqlbase.SystemDB.ID {
   358  				// This is a system table that should be marked for descriptor creation.
   359  				tablesToRemap = append(tablesToRemap, table)
   360  			} else {
   361  				// This table does not need to be remapped.
   362  				tableRewrites[table.ID].TableID = table.ID
   363  			}
   364  		} else {
   365  			tablesToRemap = append(tablesToRemap, table)
   366  		}
   367  	}
   368  	sort.Sort(sqlbase.TableDescriptors(tablesToRemap))
   369  
   370  	// Generate new IDs for the tables that need to be remapped.
   371  	for _, table := range tablesToRemap {
   372  		newTableID, err := catalogkv.GenerateUniqueDescID(ctx, p.ExecCfg().DB, p.ExecCfg().Codec)
   373  		if err != nil {
   374  			return nil, err
   375  		}
   376  		tableRewrites[table.ID].TableID = newTableID
   377  	}
   378  
   379  	return tableRewrites, nil
   380  }
   381  
   382  // maybeUpgradeTableDescsInBackupManifests updates the backup descriptors'
   383  // table descriptors to use the newer 19.2-style foreign key representation,
   384  // if they are not already upgraded. This requires resolving cross-table FK
   385  // references, which is done by looking up all table descriptors across all
   386  // backup descriptors provided. if skipFKsWithNoMatchingTable is set, FKs whose
   387  // "other" table is missing from the set provided are omitted during the
   388  // upgrade, instead of causing an error to be returned.
   389  func maybeUpgradeTableDescsInBackupManifests(
   390  	ctx context.Context,
   391  	backupManifests []BackupManifest,
   392  	codec keys.SQLCodec,
   393  	skipFKsWithNoMatchingTable bool,
   394  ) error {
   395  	protoGetter := sqlbase.MapProtoGetter{
   396  		Protos: make(map[interface{}]protoutil.Message),
   397  	}
   398  	// Populate the protoGetter with all table descriptors in all backup
   399  	// descriptors so that they can be looked up.
   400  	for _, backupManifest := range backupManifests {
   401  		for _, desc := range backupManifest.Descriptors {
   402  			if table := desc.Table(hlc.Timestamp{}); table != nil {
   403  				protoGetter.Protos[string(sqlbase.MakeDescMetadataKey(codec, table.ID))] =
   404  					sqlbase.WrapDescriptor(protoutil.Clone(table).(*sqlbase.TableDescriptor))
   405  			}
   406  		}
   407  	}
   408  
   409  	for i := range backupManifests {
   410  		backupManifest := &backupManifests[i]
   411  		for j := range backupManifest.Descriptors {
   412  			if table := backupManifest.Descriptors[j].Table(hlc.Timestamp{}); table != nil {
   413  				if _, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, codec, skipFKsWithNoMatchingTable); err != nil {
   414  					return err
   415  				}
   416  				// TODO(lucy): Is this necessary?
   417  				backupManifest.Descriptors[j] = *sqlbase.WrapDescriptor(table)
   418  			}
   419  		}
   420  	}
   421  	return nil
   422  }
   423  
   424  // RewriteTableDescs mutates tables to match the ID and privilege specified
   425  // in tableRewrites, as well as adjusting cross-table references to use the
   426  // new IDs. overrideDB can be specified to set database names in views.
   427  func RewriteTableDescs(
   428  	tables []*sqlbase.TableDescriptor, tableRewrites TableRewriteMap, overrideDB string,
   429  ) error {
   430  	for _, table := range tables {
   431  		tableRewrite, ok := tableRewrites[table.ID]
   432  		if !ok {
   433  			return errors.Errorf("missing table rewrite for table %d", table.ID)
   434  		}
   435  		if table.IsView() && overrideDB != "" {
   436  			// restore checks that all dependencies are also being restored, but if
   437  			// the restore is overriding the destination database, qualifiers in the
   438  			// view query string may be wrong. Since the destination override is
   439  			// applied to everything being restored, anything the view query
   440  			// references will be in the override DB post-restore, so all database
   441  			// qualifiers in the view query should be replaced with overrideDB.
   442  			if err := rewriteViewQueryDBNames(table, overrideDB); err != nil {
   443  				return err
   444  			}
   445  		}
   446  
   447  		table.ID = tableRewrite.TableID
   448  		table.ParentID = tableRewrite.ParentID
   449  
   450  		if err := table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error {
   451  			// Verify that for any interleaved index being restored, the interleave
   452  			// parent is also being restored. Otherwise, the interleave entries in the
   453  			// restored IndexDescriptors won't have anything to point to.
   454  			// TODO(dan): It seems like this restriction could be lifted by restoring
   455  			// stub TableDescriptors for the missing interleave parents.
   456  			for j, a := range index.Interleave.Ancestors {
   457  				ancestorRewrite, ok := tableRewrites[a.TableID]
   458  				if !ok {
   459  					return errors.Errorf(
   460  						"cannot restore table %q without interleave parent %d", table.Name, a.TableID,
   461  					)
   462  				}
   463  				index.Interleave.Ancestors[j].TableID = ancestorRewrite.TableID
   464  			}
   465  			for j, c := range index.InterleavedBy {
   466  				childRewrite, ok := tableRewrites[c.Table]
   467  				if !ok {
   468  					return errors.Errorf(
   469  						"cannot restore table %q without interleave child table %d", table.Name, c.Table,
   470  					)
   471  				}
   472  				index.InterleavedBy[j].Table = childRewrite.TableID
   473  			}
   474  			return nil
   475  		}); err != nil {
   476  			return err
   477  		}
   478  
   479  		// TODO(lucy): deal with outbound foreign key mutations here as well.
   480  		origFKs := table.OutboundFKs
   481  		table.OutboundFKs = nil
   482  		for i := range origFKs {
   483  			fk := &origFKs[i]
   484  			to := fk.ReferencedTableID
   485  			if indexRewrite, ok := tableRewrites[to]; ok {
   486  				fk.ReferencedTableID = indexRewrite.TableID
   487  				fk.OriginTableID = tableRewrite.TableID
   488  			} else {
   489  				// If indexRewrite doesn't exist, the user has specified
   490  				// restoreOptSkipMissingFKs. Error checking in the case the user hasn't has
   491  				// already been done in allocateTableRewrites.
   492  				continue
   493  			}
   494  
   495  			// TODO(dt): if there is an existing (i.e. non-restoring) table with
   496  			// a db and name matching the one the FK pointed to at backup, should
   497  			// we update the FK to point to it?
   498  			table.OutboundFKs = append(table.OutboundFKs, *fk)
   499  		}
   500  
   501  		origInboundFks := table.InboundFKs
   502  		table.InboundFKs = nil
   503  		for i := range origInboundFks {
   504  			ref := &origInboundFks[i]
   505  			if refRewrite, ok := tableRewrites[ref.OriginTableID]; ok {
   506  				ref.ReferencedTableID = tableRewrite.TableID
   507  				ref.OriginTableID = refRewrite.TableID
   508  				table.InboundFKs = append(table.InboundFKs, *ref)
   509  			}
   510  		}
   511  
   512  		for i, dest := range table.DependsOn {
   513  			if depRewrite, ok := tableRewrites[dest]; ok {
   514  				table.DependsOn[i] = depRewrite.TableID
   515  			} else {
   516  				// Views with missing dependencies should have been filtered out
   517  				// or have caused an error in maybeFilterMissingViews().
   518  				return errors.AssertionFailedf(
   519  					"cannot restore %q because referenced table %d was not found",
   520  					table.Name, dest)
   521  			}
   522  		}
   523  		origRefs := table.DependedOnBy
   524  		table.DependedOnBy = nil
   525  		for _, ref := range origRefs {
   526  			if refRewrite, ok := tableRewrites[ref.ID]; ok {
   527  				ref.ID = refRewrite.TableID
   528  				table.DependedOnBy = append(table.DependedOnBy, ref)
   529  			}
   530  		}
   531  
   532  		// Rewrite sequence references in column descriptors.
   533  		for idx := range table.Columns {
   534  			var newSeqRefs []sqlbase.ID
   535  			col := &table.Columns[idx]
   536  			for _, seqID := range col.UsesSequenceIds {
   537  				if rewrite, ok := tableRewrites[seqID]; ok {
   538  					newSeqRefs = append(newSeqRefs, rewrite.TableID)
   539  				} else {
   540  					// The referenced sequence isn't being restored.
   541  					// Strip the DEFAULT expression and sequence references.
   542  					// To get here, the user must have specified 'skip_missing_sequences' --
   543  					// otherwise, would have errored out in allocateTableRewrites.
   544  					newSeqRefs = []sqlbase.ID{}
   545  					col.DefaultExpr = nil
   546  					break
   547  				}
   548  			}
   549  			col.UsesSequenceIds = newSeqRefs
   550  		}
   551  
   552  		// since this is a "new" table in eyes of new cluster, any leftover change
   553  		// lease is obviously bogus (plus the nodeID is relative to backup cluster).
   554  		table.Lease = nil
   555  	}
   556  	return nil
   557  }
   558  
   559  func errOnMissingRange(span covering.Range, start, end hlc.Timestamp) error {
   560  	return errors.Errorf(
   561  		"no backup covers time [%s,%s) for range [%s,%s) (or backups out of order)",
   562  		start, end, roachpb.Key(span.Start), roachpb.Key(span.End),
   563  	)
   564  }
   565  
   566  func restoreJobDescription(
   567  	p sql.PlanHookState, restore *tree.Restore, from [][]string, opts map[string]string,
   568  ) (string, error) {
   569  	r := &tree.Restore{
   570  		AsOf:    restore.AsOf,
   571  		Options: optsToKVOptions(opts),
   572  		Targets: restore.Targets,
   573  		From:    make([]tree.PartitionedBackup, len(restore.From)),
   574  	}
   575  
   576  	for i, backup := range from {
   577  		r.From[i] = make(tree.PartitionedBackup, len(backup))
   578  		for j, uri := range backup {
   579  			sf, err := cloud.SanitizeExternalStorageURI(uri, nil /* extraParams */)
   580  			if err != nil {
   581  				return "", err
   582  			}
   583  			r.From[i][j] = tree.NewDString(sf)
   584  		}
   585  	}
   586  
   587  	ann := p.ExtendedEvalContext().Annotations
   588  	return tree.AsStringWithFQNames(r, ann), nil
   589  }
   590  
   591  // RestoreHeader is the header for RESTORE stmt results.
   592  var RestoreHeader = sqlbase.ResultColumns{
   593  	{Name: "job_id", Typ: types.Int},
   594  	{Name: "status", Typ: types.String},
   595  	{Name: "fraction_completed", Typ: types.Float},
   596  	{Name: "rows", Typ: types.Int},
   597  	{Name: "index_entries", Typ: types.Int},
   598  	{Name: "bytes", Typ: types.Int},
   599  }
   600  
   601  // restorePlanHook implements sql.PlanHookFn.
   602  func restorePlanHook(
   603  	ctx context.Context, stmt tree.Statement, p sql.PlanHookState,
   604  ) (sql.PlanHookRowFn, sqlbase.ResultColumns, []sql.PlanNode, bool, error) {
   605  	restoreStmt, ok := stmt.(*tree.Restore)
   606  	if !ok {
   607  		return nil, nil, nil, false, nil
   608  	}
   609  
   610  	fromFns := make([]func() ([]string, error), len(restoreStmt.From))
   611  	for i := range restoreStmt.From {
   612  		fromFn, err := p.TypeAsStringArray(ctx, tree.Exprs(restoreStmt.From[i]), "RESTORE")
   613  		if err != nil {
   614  			return nil, nil, nil, false, err
   615  		}
   616  		fromFns[i] = fromFn
   617  	}
   618  
   619  	optsFn, err := p.TypeAsStringOpts(ctx, restoreStmt.Options, restoreOptionExpectValues)
   620  	if err != nil {
   621  		return nil, nil, nil, false, err
   622  	}
   623  
   624  	fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error {
   625  		// TODO(dan): Move this span into sql.
   626  		ctx, span := tracing.ChildSpan(ctx, stmt.StatementTag())
   627  		defer tracing.FinishSpan(span)
   628  
   629  		if err := utilccl.CheckEnterpriseEnabled(
   630  			p.ExecCfg().Settings, p.ExecCfg().ClusterID(), p.ExecCfg().Organization(), "RESTORE",
   631  		); err != nil {
   632  			return err
   633  		}
   634  
   635  		if err := p.RequireAdminRole(ctx, "RESTORE"); err != nil {
   636  			return err
   637  		}
   638  
   639  		if !p.ExtendedEvalContext().TxnImplicit {
   640  			return errors.Errorf("RESTORE cannot be used inside a transaction")
   641  		}
   642  
   643  		from := make([][]string, len(fromFns))
   644  		for i := range fromFns {
   645  			from[i], err = fromFns[i]()
   646  			if err != nil {
   647  				return err
   648  			}
   649  		}
   650  		var endTime hlc.Timestamp
   651  		if restoreStmt.AsOf.Expr != nil {
   652  			var err error
   653  			endTime, err = p.EvalAsOfTimestamp(ctx, restoreStmt.AsOf)
   654  			if err != nil {
   655  				return err
   656  			}
   657  		}
   658  
   659  		opts, err := optsFn()
   660  		if err != nil {
   661  			return err
   662  		}
   663  		return doRestorePlan(ctx, restoreStmt, p, from, endTime, opts, resultsCh)
   664  	}
   665  	return fn, RestoreHeader, nil, false, nil
   666  }
   667  
   668  func doRestorePlan(
   669  	ctx context.Context,
   670  	restoreStmt *tree.Restore,
   671  	p sql.PlanHookState,
   672  	from [][]string,
   673  	endTime hlc.Timestamp,
   674  	opts map[string]string,
   675  	resultsCh chan<- tree.Datums,
   676  ) error {
   677  	if len(from) < 1 || len(from[0]) < 1 {
   678  		return errors.New("invalid base backup specified")
   679  	}
   680  	baseStores := make([]cloud.ExternalStorage, len(from[0]))
   681  	for i := range from[0] {
   682  		store, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, from[0][i])
   683  		if err != nil {
   684  			return errors.Wrapf(err, "failed to open backup storage location")
   685  		}
   686  		defer store.Close()
   687  		baseStores[i] = store
   688  	}
   689  
   690  	var encryption *roachpb.FileEncryptionOptions
   691  	if passphrase, ok := opts[backupOptEncPassphrase]; ok {
   692  		opts, err := readEncryptionOptions(ctx, baseStores[0])
   693  		if err != nil {
   694  			return err
   695  		}
   696  		encryptionKey := storageccl.GenerateKey([]byte(passphrase), opts.Salt)
   697  		encryption = &roachpb.FileEncryptionOptions{Key: encryptionKey}
   698  	}
   699  
   700  	defaultURIs, mainBackupManifests, localityInfo, err := resolveBackupManifests(
   701  		ctx, baseStores, p.ExecCfg().DistSQLSrv.ExternalStorageFromURI, from, endTime, encryption,
   702  	)
   703  	if err != nil {
   704  		return err
   705  	}
   706  
   707  	// Validate that the table coverage of the backup matches that of the restore.
   708  	// This prevents FULL CLUSTER backups to be restored as anything but full
   709  	// cluster restores and vice-versa.
   710  	if restoreStmt.DescriptorCoverage == tree.AllDescriptors && mainBackupManifests[0].DescriptorCoverage == tree.RequestedDescriptors {
   711  		return errors.Errorf("full cluster RESTORE can only be used on full cluster BACKUP files")
   712  	}
   713  
   714  	// Ensure that no user table descriptors exist for a full cluster restore.
   715  	txn := p.ExecCfg().DB.NewTxn(ctx, "count-user-descs")
   716  	descCount, err := catalogkv.CountUserDescriptors(ctx, txn, p.ExecCfg().Codec)
   717  	if err != nil {
   718  		return errors.Wrap(err, "looking up user descriptors during restore")
   719  	}
   720  	if descCount != 0 && restoreStmt.DescriptorCoverage == tree.AllDescriptors {
   721  		return errors.Errorf(
   722  			"full cluster restore can only be run on a cluster with no tables or databases but found %d descriptors",
   723  			descCount,
   724  		)
   725  	}
   726  
   727  	_, skipMissingFKs := opts[restoreOptSkipMissingFKs]
   728  	if err := maybeUpgradeTableDescsInBackupManifests(ctx, mainBackupManifests, p.ExecCfg().Codec, skipMissingFKs); err != nil {
   729  		return err
   730  	}
   731  
   732  	sqlDescs, restoreDBs, err := selectTargets(ctx, p, mainBackupManifests, restoreStmt.Targets, restoreStmt.DescriptorCoverage, endTime)
   733  	if err != nil {
   734  		return err
   735  	}
   736  
   737  	databasesByID := make(map[sqlbase.ID]*sqlbase.DatabaseDescriptor)
   738  	tablesByID := make(map[sqlbase.ID]*sqlbase.TableDescriptor)
   739  	for _, desc := range sqlDescs {
   740  		if dbDesc := desc.GetDatabase(); dbDesc != nil {
   741  			databasesByID[dbDesc.ID] = dbDesc
   742  		} else if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
   743  			tablesByID[tableDesc.ID] = tableDesc
   744  		}
   745  	}
   746  	filteredTablesByID, err := maybeFilterMissingViews(tablesByID, opts)
   747  	if err != nil {
   748  		return err
   749  	}
   750  	tableRewrites, err := allocateTableRewrites(ctx, p, databasesByID, filteredTablesByID, restoreDBs, restoreStmt.DescriptorCoverage, opts)
   751  	if err != nil {
   752  		return err
   753  	}
   754  	description, err := restoreJobDescription(p, restoreStmt, from, opts)
   755  	if err != nil {
   756  		return err
   757  	}
   758  
   759  	var tables []*sqlbase.TableDescriptor
   760  	for _, desc := range filteredTablesByID {
   761  		tables = append(tables, desc)
   762  	}
   763  	if err := RewriteTableDescs(tables, tableRewrites, opts[restoreOptIntoDB]); err != nil {
   764  		return err
   765  	}
   766  
   767  	// Collect telemetry.
   768  	{
   769  		telemetry.Count("restore.total.started")
   770  		if restoreStmt.DescriptorCoverage == tree.AllDescriptors {
   771  			telemetry.Count("restore.full-cluster")
   772  		}
   773  	}
   774  
   775  	_, errCh, err := p.ExecCfg().JobRegistry.CreateAndStartJob(ctx, resultsCh, jobs.Record{
   776  		Description: description,
   777  		Username:    p.User(),
   778  		DescriptorIDs: func() (sqlDescIDs []sqlbase.ID) {
   779  			for _, tableRewrite := range tableRewrites {
   780  				sqlDescIDs = append(sqlDescIDs, tableRewrite.TableID)
   781  			}
   782  			return sqlDescIDs
   783  		}(),
   784  		Details: jobspb.RestoreDetails{
   785  			EndTime:            endTime,
   786  			TableRewrites:      tableRewrites,
   787  			URIs:               defaultURIs,
   788  			BackupLocalityInfo: localityInfo,
   789  			TableDescs:         tables,
   790  			OverrideDB:         opts[restoreOptIntoDB],
   791  			DescriptorCoverage: restoreStmt.DescriptorCoverage,
   792  			Encryption:         encryption,
   793  		},
   794  		Progress: jobspb.RestoreProgress{},
   795  	})
   796  	if err != nil {
   797  		return err
   798  	}
   799  	return <-errCh
   800  }
   801  
   802  func init() {
   803  	sql.AddPlanHook(restorePlanHook)
   804  }