github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/ccl/backupccl/backup_planning.go (about)

     1  // Copyright 2016 The Cockroach Authors.
     2  //
     3  // Licensed as a CockroachDB Enterprise file under the Cockroach Community
     4  // License (the "License"); you may not use this file except in compliance with
     5  // the License. You may obtain a copy of the License at
     6  //
     7  //     https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
     8  
     9  package backupccl
    10  
    11  import (
    12  	"context"
    13  	"net/url"
    14  	"sort"
    15  
    16  	"github.com/cockroachdb/cockroach/pkg/build"
    17  	"github.com/cockroachdb/cockroach/pkg/ccl/storageccl"
    18  	"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
    19  	"github.com/cockroachdb/cockroach/pkg/clusterversion"
    20  	"github.com/cockroachdb/cockroach/pkg/jobs"
    21  	"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
    22  	"github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts"
    23  	"github.com/cockroachdb/cockroach/pkg/keys"
    24  	"github.com/cockroachdb/cockroach/pkg/kv"
    25  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    26  	"github.com/cockroachdb/cockroach/pkg/server/telemetry"
    27  	"github.com/cockroachdb/cockroach/pkg/settings"
    28  	"github.com/cockroachdb/cockroach/pkg/sql"
    29  	"github.com/cockroachdb/cockroach/pkg/sql/covering"
    30  	"github.com/cockroachdb/cockroach/pkg/sql/privilege"
    31  	"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
    32  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    33  	"github.com/cockroachdb/cockroach/pkg/sql/stats"
    34  	"github.com/cockroachdb/cockroach/pkg/sql/types"
    35  	"github.com/cockroachdb/cockroach/pkg/storage/cloud"
    36  	"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
    37  	"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
    38  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    39  	"github.com/cockroachdb/cockroach/pkg/util/interval"
    40  	"github.com/cockroachdb/cockroach/pkg/util/log"
    41  	"github.com/cockroachdb/cockroach/pkg/util/protoutil"
    42  	"github.com/cockroachdb/cockroach/pkg/util/timeutil"
    43  	"github.com/cockroachdb/cockroach/pkg/util/tracing"
    44  	"github.com/cockroachdb/cockroach/pkg/util/uuid"
    45  	"github.com/cockroachdb/errors"
    46  )
    47  
    48  const (
    49  	backupOptRevisionHistory = "revision_history"
    50  	backupOptEncPassphrase   = "encryption_passphrase"
    51  	backupOptWithPrivileges  = "privileges"
    52  	localityURLParam         = "COCKROACH_LOCALITY"
    53  	defaultLocalityValue     = "default"
    54  )
    55  
    56  // TODO(pbardea): We should move to a model of having the system tables opt-
    57  // {in,out} of being included in a full cluster backup. See #43781.
    58  var fullClusterSystemTables = []string{
    59  	// System config tables.
    60  	sqlbase.UsersTable.Name,
    61  	sqlbase.ZonesTable.Name,
    62  	sqlbase.SettingsTable.Name,
    63  	// Rest of system tables.
    64  	sqlbase.LocationsTable.Name,
    65  	sqlbase.RoleMembersTable.Name,
    66  	sqlbase.UITable.Name,
    67  	sqlbase.CommentsTable.Name,
    68  	sqlbase.JobsTable.Name,
    69  	// Table statistics are backed up in the backup descriptor for now.
    70  }
    71  
    72  var useTBI = settings.RegisterBoolSetting(
    73  	"kv.bulk_io_write.experimental_incremental_export_enabled",
    74  	"use experimental time-bound file filter when exporting in BACKUP",
    75  	true,
    76  )
    77  
    78  var backupOptionExpectValues = map[string]sql.KVStringOptValidate{
    79  	backupOptRevisionHistory: sql.KVStringOptRequireNoValue,
    80  	backupOptEncPassphrase:   sql.KVStringOptRequireValue,
    81  }
    82  
    83  type tableAndIndex struct {
    84  	tableID sqlbase.ID
    85  	indexID sqlbase.IndexID
    86  }
    87  
    88  // spansForAllTableIndexes returns non-overlapping spans for every index and
    89  // table passed in. They would normally overlap if any of them are interleaved.
    90  func spansForAllTableIndexes(
    91  	codec keys.SQLCodec, tables []*sqlbase.TableDescriptor, revs []BackupManifest_DescriptorRevision,
    92  ) []roachpb.Span {
    93  
    94  	added := make(map[tableAndIndex]bool, len(tables))
    95  	sstIntervalTree := interval.NewTree(interval.ExclusiveOverlapper)
    96  	for _, table := range tables {
    97  		for _, index := range table.AllNonDropIndexes() {
    98  			if err := sstIntervalTree.Insert(intervalSpan(table.IndexSpan(codec, index.ID)), false); err != nil {
    99  				panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
   100  			}
   101  			added[tableAndIndex{tableID: table.ID, indexID: index.ID}] = true
   102  		}
   103  	}
   104  	// If there are desc revisions, ensure that we also add any index spans
   105  	// in them that we didn't already get above e.g. indexes or tables that are
   106  	// not in latest because they were dropped during the time window in question.
   107  	for _, rev := range revs {
   108  		if tbl := rev.Desc.Table(hlc.Timestamp{}); tbl != nil {
   109  			for _, idx := range tbl.AllNonDropIndexes() {
   110  				key := tableAndIndex{tableID: tbl.ID, indexID: idx.ID}
   111  				if !added[key] {
   112  					if err := sstIntervalTree.Insert(intervalSpan(tbl.IndexSpan(codec, idx.ID)), false); err != nil {
   113  						panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan"))
   114  					}
   115  					added[key] = true
   116  				}
   117  			}
   118  		}
   119  	}
   120  
   121  	var spans []roachpb.Span
   122  	_ = sstIntervalTree.Do(func(r interval.Interface) bool {
   123  		spans = append(spans, roachpb.Span{
   124  			Key:    roachpb.Key(r.Range().Start),
   125  			EndKey: roachpb.Key(r.Range().End),
   126  		})
   127  		return false
   128  	})
   129  	return spans
   130  }
   131  
   132  func optsToKVOptions(opts map[string]string) tree.KVOptions {
   133  	if len(opts) == 0 {
   134  		return nil
   135  	}
   136  	sortedOpts := make([]string, 0, len(opts))
   137  	for k := range opts {
   138  		sortedOpts = append(sortedOpts, k)
   139  	}
   140  	sort.Strings(sortedOpts)
   141  	kvopts := make(tree.KVOptions, 0, len(opts))
   142  	for _, k := range sortedOpts {
   143  		opt := tree.KVOption{Key: tree.Name(k)}
   144  		if v := opts[k]; v != "" {
   145  			if k == backupOptEncPassphrase {
   146  				v = "redacted"
   147  			}
   148  			opt.Value = tree.NewDString(v)
   149  		}
   150  		kvopts = append(kvopts, opt)
   151  	}
   152  	return kvopts
   153  }
   154  
   155  // getURIsByLocalityKV takes a slice of URIs for a single (possibly partitioned)
   156  // backup, and returns the default backup destination URI and a map of all other
   157  // URIs by locality KV, apppending appendPath to the path component of both the
   158  // default URI and all the locality URIs. The URIs in the result do not include
   159  // the COCKROACH_LOCALITY parameter.
   160  func getURIsByLocalityKV(to []string, appendPath string) (string, map[string]string, error) {
   161  	localityAndBaseURI := func(uri string) (string, string, error) {
   162  		parsedURI, err := url.Parse(uri)
   163  		if err != nil {
   164  			return "", "", err
   165  		}
   166  		q := parsedURI.Query()
   167  		localityKV := q.Get(localityURLParam)
   168  		// Remove the backup locality parameter.
   169  		q.Del(localityURLParam)
   170  		parsedURI.RawQuery = q.Encode()
   171  		if appendPath != "" {
   172  			parsedURI.Path = parsedURI.Path + appendPath
   173  		}
   174  		baseURI := parsedURI.String()
   175  		return localityKV, baseURI, nil
   176  	}
   177  
   178  	urisByLocalityKV := make(map[string]string)
   179  	if len(to) == 1 {
   180  		localityKV, baseURI, err := localityAndBaseURI(to[0])
   181  		if err != nil {
   182  			return "", nil, err
   183  		}
   184  		if localityKV != "" && localityKV != defaultLocalityValue {
   185  			return "", nil, errors.Errorf("%s %s is invalid for a single BACKUP location",
   186  				localityURLParam, localityKV)
   187  		}
   188  		return baseURI, urisByLocalityKV, nil
   189  	}
   190  
   191  	var defaultURI string
   192  	for _, uri := range to {
   193  		localityKV, baseURI, err := localityAndBaseURI(uri)
   194  		if err != nil {
   195  			return "", nil, err
   196  		}
   197  		if localityKV == "" {
   198  			return "", nil, errors.Errorf(
   199  				"multiple URLs are provided for partitioned BACKUP, but %s is not specified",
   200  				localityURLParam,
   201  			)
   202  		}
   203  		if localityKV == defaultLocalityValue {
   204  			if defaultURI != "" {
   205  				return "", nil, errors.Errorf("multiple default URLs provided for partition backup")
   206  			}
   207  			defaultURI = baseURI
   208  		} else {
   209  			kv := roachpb.Tier{}
   210  			if err := kv.FromString(localityKV); err != nil {
   211  				return "", nil, errors.Wrap(err, "failed to parse backup locality")
   212  			}
   213  			if _, ok := urisByLocalityKV[localityKV]; ok {
   214  				return "", nil, errors.Errorf("duplicate URIs for locality %s", localityKV)
   215  			}
   216  			urisByLocalityKV[localityKV] = baseURI
   217  		}
   218  	}
   219  	if defaultURI == "" {
   220  		return "", nil, errors.Errorf("no default URL provided for partitioned backup")
   221  	}
   222  	return defaultURI, urisByLocalityKV, nil
   223  }
   224  
   225  func backupJobDescription(
   226  	p sql.PlanHookState,
   227  	backup *tree.Backup,
   228  	to []string,
   229  	incrementalFrom []string,
   230  	opts map[string]string,
   231  ) (string, error) {
   232  	b := &tree.Backup{
   233  		AsOf:    backup.AsOf,
   234  		Options: optsToKVOptions(opts),
   235  		Targets: backup.Targets,
   236  	}
   237  
   238  	for _, t := range to {
   239  		sanitizedTo, err := cloud.SanitizeExternalStorageURI(t, nil /* extraParams */)
   240  		if err != nil {
   241  			return "", err
   242  		}
   243  		b.To = append(b.To, tree.NewDString(sanitizedTo))
   244  	}
   245  
   246  	for _, from := range incrementalFrom {
   247  		sanitizedFrom, err := cloud.SanitizeExternalStorageURI(from, nil /* extraParams */)
   248  		if err != nil {
   249  			return "", err
   250  		}
   251  		b.IncrementalFrom = append(b.IncrementalFrom, tree.NewDString(sanitizedFrom))
   252  	}
   253  
   254  	ann := p.ExtendedEvalContext().Annotations
   255  	return tree.AsStringWithFQNames(b, ann), nil
   256  }
   257  
   258  // backupPlanHook implements PlanHookFn.
   259  func backupPlanHook(
   260  	ctx context.Context, stmt tree.Statement, p sql.PlanHookState,
   261  ) (sql.PlanHookRowFn, sqlbase.ResultColumns, []sql.PlanNode, bool, error) {
   262  	backupStmt, ok := stmt.(*tree.Backup)
   263  	if !ok {
   264  		return nil, nil, nil, false, nil
   265  	}
   266  
   267  	toFn, err := p.TypeAsStringArray(ctx, tree.Exprs(backupStmt.To), "BACKUP")
   268  	if err != nil {
   269  		return nil, nil, nil, false, err
   270  	}
   271  	incrementalFromFn, err := p.TypeAsStringArray(ctx, backupStmt.IncrementalFrom, "BACKUP")
   272  	if err != nil {
   273  		return nil, nil, nil, false, err
   274  	}
   275  	optsFn, err := p.TypeAsStringOpts(ctx, backupStmt.Options, backupOptionExpectValues)
   276  	if err != nil {
   277  		return nil, nil, nil, false, err
   278  	}
   279  
   280  	header := sqlbase.ResultColumns{
   281  		{Name: "job_id", Typ: types.Int},
   282  		{Name: "status", Typ: types.String},
   283  		{Name: "fraction_completed", Typ: types.Float},
   284  		{Name: "rows", Typ: types.Int},
   285  		{Name: "index_entries", Typ: types.Int},
   286  		{Name: "bytes", Typ: types.Int},
   287  	}
   288  
   289  	fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error {
   290  		// TODO(dan): Move this span into sql.
   291  		ctx, span := tracing.ChildSpan(ctx, stmt.StatementTag())
   292  		defer tracing.FinishSpan(span)
   293  
   294  		if err := utilccl.CheckEnterpriseEnabled(
   295  			p.ExecCfg().Settings, p.ExecCfg().ClusterID(), p.ExecCfg().Organization(), "BACKUP",
   296  		); err != nil {
   297  			return err
   298  		}
   299  
   300  		if err := p.RequireAdminRole(ctx, "BACKUP"); err != nil {
   301  			return err
   302  		}
   303  
   304  		if !p.ExtendedEvalContext().TxnImplicit {
   305  			return errors.Errorf("BACKUP cannot be used inside a transaction")
   306  		}
   307  
   308  		to, err := toFn()
   309  		if err != nil {
   310  			return err
   311  		}
   312  		if len(to) > 1 &&
   313  			!p.ExecCfg().Settings.Version.IsActive(ctx, clusterversion.VersionPartitionedBackup) {
   314  			return errors.Errorf("partitioned backups can only be made on a cluster that has been fully upgraded to version 19.2")
   315  		}
   316  
   317  		incrementalFrom, err := incrementalFromFn()
   318  		if err != nil {
   319  			return err
   320  		}
   321  
   322  		endTime := p.ExecCfg().Clock.Now()
   323  		if backupStmt.AsOf.Expr != nil {
   324  			var err error
   325  			if endTime, err = p.EvalAsOfTimestamp(ctx, backupStmt.AsOf); err != nil {
   326  				return err
   327  			}
   328  		}
   329  
   330  		opts, err := optsFn()
   331  		if err != nil {
   332  			return err
   333  		}
   334  
   335  		mvccFilter := MVCCFilter_Latest
   336  		if _, ok := opts[backupOptRevisionHistory]; ok {
   337  			mvccFilter = MVCCFilter_All
   338  		}
   339  
   340  		targetDescs, completeDBs, err := ResolveTargetsToDescriptors(ctx, p, endTime, backupStmt.Targets, backupStmt.DescriptorCoverage)
   341  		if err != nil {
   342  			return err
   343  		}
   344  
   345  		statsCache := p.ExecCfg().TableStatsCache
   346  		tableStatistics := make([]*stats.TableStatisticProto, 0)
   347  		var tables []*sqlbase.TableDescriptor
   348  		for _, desc := range targetDescs {
   349  			if dbDesc := desc.GetDatabase(); dbDesc != nil {
   350  				if err := p.CheckPrivilege(ctx, dbDesc, privilege.SELECT); err != nil {
   351  					return err
   352  				}
   353  			}
   354  			if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
   355  				if err := p.CheckPrivilege(ctx, tableDesc, privilege.SELECT); err != nil {
   356  					return err
   357  				}
   358  				tables = append(tables, tableDesc)
   359  
   360  				// If the table has any user defined types, error out.
   361  				for _, col := range tableDesc.Columns {
   362  					if col.Type.UserDefined() {
   363  						return unimplemented.NewWithIssue(48689, "user defined types in backup")
   364  					}
   365  				}
   366  
   367  				// Collect all the table stats for this table.
   368  				tableStatisticsAcc, err := statsCache.GetTableStats(ctx, tableDesc.GetID())
   369  				if err != nil {
   370  					return err
   371  				}
   372  				for i := range tableStatisticsAcc {
   373  					tableStatistics = append(tableStatistics, &tableStatisticsAcc[i].TableStatisticProto)
   374  				}
   375  			}
   376  		}
   377  
   378  		if err := ensureInterleavesIncluded(tables); err != nil {
   379  			return err
   380  		}
   381  
   382  		makeCloudStorage := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI
   383  
   384  		var encryptionPassphrase []byte
   385  		if passphrase, ok := opts[backupOptEncPassphrase]; ok {
   386  			encryptionPassphrase = []byte(passphrase)
   387  		}
   388  
   389  		defaultURI, urisByLocalityKV, err := getURIsByLocalityKV(to, "")
   390  		if err != nil {
   391  			return err
   392  		}
   393  		defaultStore, err := makeCloudStorage(ctx, defaultURI)
   394  		if err != nil {
   395  			return err
   396  		}
   397  		// We can mutate `defaultStore` below so we defer a func which closes over
   398  		// the var, instead of defering the Close() method directly on this specifc
   399  		// instance.
   400  		defer func() {
   401  			defaultStore.Close()
   402  		}()
   403  
   404  		var encryption *roachpb.FileEncryptionOptions
   405  		var prevBackups []BackupManifest
   406  		g := ctxgroup.WithContext(ctx)
   407  		if len(incrementalFrom) > 0 {
   408  			if encryptionPassphrase != nil {
   409  				exportStore, err := makeCloudStorage(ctx, incrementalFrom[0])
   410  				if err != nil {
   411  					return err
   412  				}
   413  				defer exportStore.Close()
   414  				opts, err := readEncryptionOptions(ctx, exportStore)
   415  				if err != nil {
   416  					return err
   417  				}
   418  				encryption = &roachpb.FileEncryptionOptions{
   419  					Key: storageccl.GenerateKey(encryptionPassphrase, opts.Salt),
   420  				}
   421  			}
   422  			prevBackups = make([]BackupManifest, len(incrementalFrom))
   423  			for i := range incrementalFrom {
   424  				i := i
   425  				g.GoCtx(func(ctx context.Context) error {
   426  					// TODO(lucy): We may want to upgrade the table descs to the newer
   427  					// foreign key representation here, in case there are backups from an
   428  					// older cluster. Keeping the descriptors as they are works for now
   429  					// since all we need to do is get the past backups' table/index spans,
   430  					// but it will be safer for future code to avoid having older-style
   431  					// descriptors around.
   432  					uri := incrementalFrom[i]
   433  					desc, err := ReadBackupManifestFromURI(
   434  						ctx, uri, makeCloudStorage, encryption,
   435  					)
   436  					if err != nil {
   437  						return errors.Wrapf(err, "failed to read backup from %q", uri)
   438  					}
   439  					prevBackups[i] = desc
   440  					return nil
   441  				})
   442  			}
   443  			if err := g.Wait(); err != nil {
   444  				return err
   445  			}
   446  		} else {
   447  			exists, err := containsManifest(ctx, defaultStore)
   448  			if err != nil {
   449  				return err
   450  			}
   451  			if exists {
   452  				if encryptionPassphrase != nil {
   453  					encOpts, err := readEncryptionOptions(ctx, defaultStore)
   454  					if err != nil {
   455  						return err
   456  					}
   457  					encryption = &roachpb.FileEncryptionOptions{
   458  						Key: storageccl.GenerateKey(encryptionPassphrase, encOpts.Salt),
   459  					}
   460  				}
   461  
   462  				prev, err := findPriorBackups(ctx, defaultStore)
   463  				if err != nil {
   464  					return errors.Wrapf(err, "determining base for incremental backup")
   465  				}
   466  				prevBackups = make([]BackupManifest, len(prev)+1)
   467  
   468  				m, err := readBackupManifestFromStore(ctx, defaultStore, encryption)
   469  				if err != nil {
   470  					return errors.Wrap(err, "loading base backup manifest")
   471  				}
   472  				prevBackups[0] = m
   473  
   474  				if m.DescriptorCoverage == tree.AllDescriptors &&
   475  					backupStmt.DescriptorCoverage != tree.AllDescriptors {
   476  					return errors.Errorf("cannot append a backup of specific tables or databases to a full-cluster backup")
   477  				}
   478  
   479  				for i := range prev {
   480  					i := i
   481  					g.GoCtx(func(ctx context.Context) error {
   482  						inc := prev[i]
   483  						m, err := readBackupManifest(ctx, defaultStore, inc, encryption)
   484  						if err != nil {
   485  							return errors.Wrapf(err, "loading prior backup part manifest %q", inc)
   486  						}
   487  						prevBackups[i+1] = m
   488  						return nil
   489  					})
   490  				}
   491  				if err := g.Wait(); err != nil {
   492  					return err
   493  				}
   494  
   495  				// Pick a piece-specific suffix and update the destination path(s).
   496  				partName := endTime.GoTime().Format("/20060102/150405.00")
   497  				defaultURI, urisByLocalityKV, err = getURIsByLocalityKV(to, partName)
   498  				if err != nil {
   499  					return errors.Wrap(err, "adjusting backup destination to append new layer to existing backup")
   500  				}
   501  				// Close the old store before overwriting the reference with the new
   502  				// subdir store.
   503  				defaultStore.Close()
   504  				defaultStore, err = makeCloudStorage(ctx, defaultURI)
   505  				if err != nil {
   506  					return errors.Wrap(err, "re-opening layer-specific destination location")
   507  				}
   508  				// Note that a Close() is already deferred above.
   509  			}
   510  		}
   511  
   512  		clusterID := p.ExecCfg().ClusterID()
   513  		for i := range prevBackups {
   514  			// IDs are how we identify tables, and those are only meaningful in the
   515  			// context of their own cluster, so we need to ensure we only allow
   516  			// incremental previous backups that we created.
   517  			if fromCluster := prevBackups[i].ClusterID; !fromCluster.Equal(clusterID) {
   518  				return errors.Newf("previous BACKUP belongs to cluster %s", fromCluster.String())
   519  			}
   520  		}
   521  
   522  		var startTime hlc.Timestamp
   523  		var newSpans roachpb.Spans
   524  		if len(prevBackups) > 0 {
   525  			startTime = prevBackups[len(prevBackups)-1].EndTime
   526  		}
   527  
   528  		var priorIDs map[sqlbase.ID]sqlbase.ID
   529  
   530  		var revs []BackupManifest_DescriptorRevision
   531  		if mvccFilter == MVCCFilter_All {
   532  			priorIDs = make(map[sqlbase.ID]sqlbase.ID)
   533  			revs, err = getRelevantDescChanges(ctx, p.ExecCfg().DB, startTime, endTime, targetDescs, completeDBs, priorIDs)
   534  			if err != nil {
   535  				return err
   536  			}
   537  		}
   538  
   539  		spans := spansForAllTableIndexes(p.ExecCfg().Codec, tables, revs)
   540  
   541  		if len(prevBackups) > 0 {
   542  			tablesInPrev := make(map[sqlbase.ID]struct{})
   543  			dbsInPrev := make(map[sqlbase.ID]struct{})
   544  			for _, d := range prevBackups[len(prevBackups)-1].Descriptors {
   545  				if t := d.Table(hlc.Timestamp{}); t != nil {
   546  					tablesInPrev[t.ID] = struct{}{}
   547  				}
   548  			}
   549  			for _, d := range prevBackups[len(prevBackups)-1].CompleteDbs {
   550  				dbsInPrev[d] = struct{}{}
   551  			}
   552  
   553  			if backupStmt.DescriptorCoverage != tree.AllDescriptors {
   554  				if err := checkForNewTables(ctx, p.ExecCfg().DB, targetDescs, tablesInPrev, dbsInPrev, priorIDs, startTime, endTime); err != nil {
   555  					return err
   556  				}
   557  			}
   558  
   559  			var err error
   560  			_, coveredTime, err := makeImportSpans(
   561  				spans,
   562  				prevBackups,
   563  				nil, /*backupLocalityInfo*/
   564  				keys.MinKey,
   565  				func(span covering.Range, start, end hlc.Timestamp) error {
   566  					if (start == hlc.Timestamp{}) {
   567  						newSpans = append(newSpans, roachpb.Span{Key: span.Start, EndKey: span.End})
   568  						return nil
   569  					}
   570  					return errOnMissingRange(span, start, end)
   571  				},
   572  			)
   573  			if err != nil {
   574  				return errors.Wrapf(err, "invalid previous backups (a new full backup may be required if a table has been created, dropped or truncated)")
   575  			}
   576  			if coveredTime != startTime {
   577  				return errors.Wrapf(err, "expected previous backups to cover until time %v, got %v", startTime, coveredTime)
   578  			}
   579  		}
   580  
   581  		nodeID, err := p.ExecCfg().NodeID.OptionalNodeIDErr(47970)
   582  		if err != nil {
   583  			return err
   584  		}
   585  
   586  		// if CompleteDbs is lost by a 1.x node, FormatDescriptorTrackingVersion
   587  		// means that a 2.0 node will disallow `RESTORE DATABASE foo`, but `RESTORE
   588  		// foo.table1, foo.table2...` will still work. MVCCFilter would be
   589  		// mis-handled, but is disallowed above. IntroducedSpans may also be lost by
   590  		// a 1.x node, meaning that if 1.1 nodes may resume a backup, the limitation
   591  		// of requiring full backups after schema changes remains.
   592  
   593  		backupManifest := BackupManifest{
   594  			StartTime:          startTime,
   595  			EndTime:            endTime,
   596  			MVCCFilter:         mvccFilter,
   597  			Descriptors:        targetDescs,
   598  			DescriptorChanges:  revs,
   599  			CompleteDbs:        completeDBs,
   600  			Spans:              spans,
   601  			IntroducedSpans:    newSpans,
   602  			FormatVersion:      BackupFormatDescriptorTrackingVersion,
   603  			BuildInfo:          build.GetInfo(),
   604  			NodeID:             nodeID,
   605  			ClusterID:          p.ExecCfg().ClusterID(),
   606  			Statistics:         tableStatistics,
   607  			DescriptorCoverage: backupStmt.DescriptorCoverage,
   608  		}
   609  
   610  		// Sanity check: re-run the validation that RESTORE will do, but this time
   611  		// including this backup, to ensure that the this backup plus any previous
   612  		// backups does cover the interval expected.
   613  		if _, coveredEnd, err := makeImportSpans(
   614  			spans,
   615  			append(prevBackups, backupManifest),
   616  			nil, /*backupLocalityInfo*/
   617  			keys.MinKey,
   618  			errOnMissingRange,
   619  		); err != nil {
   620  			return err
   621  		} else if coveredEnd != endTime {
   622  			return errors.Errorf("expected backup (along with any previous backups) to cover to %v, not %v", endTime, coveredEnd)
   623  		}
   624  
   625  		descBytes, err := protoutil.Marshal(&backupManifest)
   626  		if err != nil {
   627  			return err
   628  		}
   629  
   630  		description, err := backupJobDescription(p, backupStmt, to, incrementalFrom, opts)
   631  		if err != nil {
   632  			return err
   633  		}
   634  
   635  		// If we didn't load any prior backups from which get encryption info, we
   636  		// need to pick a new salt and record it.
   637  		if encryptionPassphrase != nil && encryption == nil {
   638  			salt, err := storageccl.GenerateSalt()
   639  			if err != nil {
   640  				return err
   641  			}
   642  			exportStore, err := makeCloudStorage(ctx, defaultURI)
   643  			if err != nil {
   644  				return err
   645  			}
   646  			defer exportStore.Close()
   647  			if err := writeEncryptionOptions(ctx, &EncryptionInfo{Salt: salt}, exportStore); err != nil {
   648  				return err
   649  			}
   650  			encryption = &roachpb.FileEncryptionOptions{Key: storageccl.GenerateKey(encryptionPassphrase, salt)}
   651  		}
   652  
   653  		// TODO (lucy): For partitioned backups, also add verification for other
   654  		// stores we are writing to in addition to the default.
   655  		if err := VerifyUsableExportTarget(
   656  			ctx, p.ExecCfg().Settings, defaultStore, defaultURI, encryption,
   657  		); err != nil {
   658  			return err
   659  		}
   660  
   661  		backupDetails := jobspb.BackupDetails{
   662  			StartTime:        startTime,
   663  			EndTime:          endTime,
   664  			URI:              defaultURI,
   665  			URIsByLocalityKV: urisByLocalityKV,
   666  			BackupManifest:   descBytes,
   667  			Encryption:       encryption,
   668  		}
   669  		if len(spans) > 0 {
   670  			protectedtsID := uuid.MakeV4()
   671  			backupDetails.ProtectedTimestampRecord = &protectedtsID
   672  		}
   673  
   674  		jr := jobs.Record{
   675  			Description: description,
   676  			Username:    p.User(),
   677  			DescriptorIDs: func() (sqlDescIDs []sqlbase.ID) {
   678  				for _, sqlDesc := range backupManifest.Descriptors {
   679  					sqlDescIDs = append(sqlDescIDs, sqlDesc.GetID())
   680  				}
   681  				return sqlDescIDs
   682  			}(),
   683  			Details:  backupDetails,
   684  			Progress: jobspb.BackupProgress{},
   685  		}
   686  		var sj *jobs.StartableJob
   687  		if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) {
   688  			sj, err = p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, jr, txn, resultsCh)
   689  			if err != nil {
   690  				return err
   691  			}
   692  			if len(spans) > 0 {
   693  				tsToProtect := endTime
   694  				rec := jobsprotectedts.MakeRecord(*backupDetails.ProtectedTimestampRecord, *sj.ID(), tsToProtect, spans)
   695  				return p.ExecCfg().ProtectedTimestampProvider.Protect(ctx, txn, rec)
   696  			}
   697  			return nil
   698  		}); err != nil {
   699  			if sj != nil {
   700  				if cleanupErr := sj.CleanupOnRollback(ctx); cleanupErr != nil {
   701  					log.Warningf(ctx, "failed to cleanup StartableJob: %v", cleanupErr)
   702  				}
   703  			}
   704  		}
   705  
   706  		// Collect telemetry.
   707  		{
   708  			telemetry.Count("backup.total.started")
   709  			if startTime.IsEmpty() {
   710  				telemetry.Count("backup.span.full")
   711  			} else {
   712  				telemetry.Count("backup.span.incremental")
   713  				telemetry.CountBucketed("backup.incremental-span-sec", int64(timeutil.Since(startTime.GoTime()).Seconds()))
   714  				if len(incrementalFrom) == 0 {
   715  					telemetry.Count("backup.auto-incremental")
   716  				}
   717  			}
   718  			if len(backupStmt.To) > 1 {
   719  				telemetry.Count("backup.partitioned")
   720  			}
   721  			if mvccFilter == MVCCFilter_All {
   722  				telemetry.Count("backup.revision-history")
   723  			}
   724  			if encryption != nil {
   725  				telemetry.Count("backup.encrypted")
   726  			}
   727  			if backupStmt.DescriptorCoverage == tree.AllDescriptors {
   728  				telemetry.Count("backup.targets.full_cluster")
   729  			}
   730  		}
   731  
   732  		errCh, err := sj.Start(ctx)
   733  		if err != nil {
   734  			return err
   735  		}
   736  		return <-errCh
   737  	}
   738  	return fn, header, nil, false, nil
   739  }
   740  
   741  // checkForNewTables returns an error if any new tables were introduced with the
   742  // following exceptions:
   743  // 1. A previous backup contained the entire DB.
   744  // 2. The table was truncated after a previous backup was taken, so it's ID has
   745  // changed.
   746  func checkForNewTables(
   747  	ctx context.Context,
   748  	db *kv.DB,
   749  	targetDescs []sqlbase.Descriptor,
   750  	tablesInPrev map[sqlbase.ID]struct{},
   751  	dbsInPrev map[sqlbase.ID]struct{},
   752  	priorIDs map[sqlbase.ID]sqlbase.ID,
   753  	startTime hlc.Timestamp,
   754  	endTime hlc.Timestamp,
   755  ) error {
   756  	for _, d := range targetDescs {
   757  		if t := d.Table(hlc.Timestamp{}); t != nil {
   758  			// If we're trying to use a previous backup for this table, ideally it
   759  			// actually contains this table.
   760  			if _, ok := tablesInPrev[t.ID]; ok {
   761  				continue
   762  			}
   763  			// This table isn't in the previous backup... maybe was added to a
   764  			// DB that the previous backup captured?
   765  			if _, ok := dbsInPrev[t.ParentID]; ok {
   766  				continue
   767  			}
   768  			// Maybe this table is missing from the previous backup because it was
   769  			// truncated?
   770  			if t.ReplacementOf.ID != sqlbase.InvalidID {
   771  				// Check if we need to lazy-load the priorIDs (i.e. if this is the first
   772  				// truncate we've encountered in non-MVCC backup).
   773  				if priorIDs == nil {
   774  					priorIDs = make(map[sqlbase.ID]sqlbase.ID)
   775  					_, err := getAllDescChanges(ctx, db, startTime, endTime, priorIDs)
   776  					if err != nil {
   777  						return err
   778  					}
   779  				}
   780  				found := false
   781  				for was := t.ReplacementOf.ID; was != sqlbase.InvalidID && !found; was = priorIDs[was] {
   782  					_, found = tablesInPrev[was]
   783  				}
   784  				if found {
   785  					continue
   786  				}
   787  			}
   788  			return errors.Errorf("previous backup does not contain table %q", t.Name)
   789  		}
   790  	}
   791  	return nil
   792  }
   793  
   794  func init() {
   795  	sql.AddPlanHook(backupPlanHook)
   796  }