github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/ccl/backupccl/manifest_handling.go (about)

     1  // Copyright 2016 The Cockroach Authors.
     2  //
     3  // Licensed as a CockroachDB Enterprise file under the Cockroach Community
     4  // License (the "License"); you may not use this file except in compliance with
     5  // the License. You may obtain a copy of the License at
     6  //
     7  //     https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
     8  
     9  package backupccl
    10  
    11  import (
    12  	"bytes"
    13  	"compress/gzip"
    14  	"context"
    15  	"io/ioutil"
    16  	"net/http"
    17  	"net/url"
    18  	"path"
    19  	"sort"
    20  
    21  	"github.com/cockroachdb/cockroach/pkg/ccl/storageccl"
    22  	"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
    23  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    24  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    25  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
    26  	"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
    27  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    28  	"github.com/cockroachdb/cockroach/pkg/storage/cloud"
    29  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    30  	"github.com/cockroachdb/cockroach/pkg/util/log"
    31  	"github.com/cockroachdb/cockroach/pkg/util/protoutil"
    32  	"github.com/cockroachdb/errors"
    33  )
    34  
    35  const (
    36  	// BackupManifestName is the file name used for serialized
    37  	// BackupManifest protos.
    38  	BackupManifestName = "BACKUP"
    39  	// BackupNewManifestName is a future name for the serialized
    40  	// BackupManifest proto.
    41  	BackupNewManifestName = "BACKUP_MANIFEST"
    42  
    43  	// BackupPartitionDescriptorPrefix is the file name prefix for serialized
    44  	// BackupPartitionDescriptor protos.
    45  	BackupPartitionDescriptorPrefix = "BACKUP_PART"
    46  	// BackupManifestCheckpointName is the file name used to store the
    47  	// serialized BackupManifest proto while the backup is in progress.
    48  	BackupManifestCheckpointName = "BACKUP-CHECKPOINT"
    49  	// BackupFormatDescriptorTrackingVersion added tracking of complete DBs.
    50  	BackupFormatDescriptorTrackingVersion uint32 = 1
    51  	// ZipType is the format of a GZipped compressed file.
    52  	ZipType = "application/x-gzip"
    53  )
    54  
    55  // BackupFileDescriptors is an alias on which to implement sort's interface.
    56  type BackupFileDescriptors []BackupManifest_File
    57  
    58  func (r BackupFileDescriptors) Len() int      { return len(r) }
    59  func (r BackupFileDescriptors) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
    60  func (r BackupFileDescriptors) Less(i, j int) bool {
    61  	if cmp := bytes.Compare(r[i].Span.Key, r[j].Span.Key); cmp != 0 {
    62  		return cmp < 0
    63  	}
    64  	return bytes.Compare(r[i].Span.EndKey, r[j].Span.EndKey) < 0
    65  }
    66  
    67  // ReadBackupManifestFromURI creates an export store from the given URI, then
    68  // reads and unmarshals a BackupManifest at the standard location in the
    69  // export storage.
    70  func ReadBackupManifestFromURI(
    71  	ctx context.Context,
    72  	uri string,
    73  	makeExternalStorageFromURI cloud.ExternalStorageFromURIFactory,
    74  	encryption *roachpb.FileEncryptionOptions,
    75  ) (BackupManifest, error) {
    76  	exportStore, err := makeExternalStorageFromURI(ctx, uri)
    77  
    78  	if err != nil {
    79  		return BackupManifest{}, err
    80  	}
    81  	defer exportStore.Close()
    82  	return readBackupManifestFromStore(ctx, exportStore, encryption)
    83  }
    84  
    85  func readBackupManifestFromStore(
    86  	ctx context.Context, exportStore cloud.ExternalStorage, encryption *roachpb.FileEncryptionOptions,
    87  ) (BackupManifest, error) {
    88  
    89  	backupManifest, err := readBackupManifest(ctx, exportStore, BackupManifestName, encryption)
    90  	if err != nil {
    91  		newManifest, newErr := readBackupManifest(ctx, exportStore, BackupNewManifestName, encryption)
    92  		if newErr != nil {
    93  			return BackupManifest{}, err
    94  		}
    95  		backupManifest = newManifest
    96  	}
    97  	backupManifest.Dir = exportStore.Conf()
    98  	// TODO(dan): Sanity check this BackupManifest: non-empty EndTime,
    99  	// non-empty Paths, and non-overlapping Spans and keyranges in Files.
   100  	return backupManifest, nil
   101  }
   102  
   103  func containsManifest(ctx context.Context, exportStore cloud.ExternalStorage) (bool, error) {
   104  	r, err := exportStore.ReadFile(ctx, BackupManifestName)
   105  	if err != nil {
   106  		//nolint:returnerrcheck
   107  		return false, nil /* TODO(dt): only silence non-exists errors */
   108  	}
   109  	r.Close()
   110  	return true, nil
   111  }
   112  
   113  // compressData compresses data buffer and returns compressed
   114  // bytes (i.e. gzip format).
   115  func compressData(descBuf []byte) ([]byte, error) {
   116  	gzipBuf := bytes.NewBuffer([]byte{})
   117  	gz := gzip.NewWriter(gzipBuf)
   118  	if _, err := gz.Write(descBuf); err != nil {
   119  		return nil, err
   120  	}
   121  	if err := gz.Close(); err != nil {
   122  		return nil, err
   123  	}
   124  	return gzipBuf.Bytes(), nil
   125  }
   126  
   127  // DecompressData decompresses gzip data buffer and
   128  // returns decompressed bytes.
   129  func DecompressData(descBytes []byte) ([]byte, error) {
   130  	r, err := gzip.NewReader(bytes.NewBuffer(descBytes))
   131  	if err != nil {
   132  		return nil, err
   133  	}
   134  	defer r.Close()
   135  	return ioutil.ReadAll(r)
   136  }
   137  
   138  // readBackupManifest reads and unmarshals a BackupManifest from filename in
   139  // the provided export store.
   140  func readBackupManifest(
   141  	ctx context.Context,
   142  	exportStore cloud.ExternalStorage,
   143  	filename string,
   144  	encryption *roachpb.FileEncryptionOptions,
   145  ) (BackupManifest, error) {
   146  	r, err := exportStore.ReadFile(ctx, filename)
   147  	if err != nil {
   148  		return BackupManifest{}, err
   149  	}
   150  	defer r.Close()
   151  	descBytes, err := ioutil.ReadAll(r)
   152  	if err != nil {
   153  		return BackupManifest{}, err
   154  	}
   155  	if encryption != nil {
   156  		descBytes, err = storageccl.DecryptFile(descBytes, encryption.Key)
   157  		if err != nil {
   158  			return BackupManifest{}, err
   159  		}
   160  	}
   161  	fileType := http.DetectContentType(descBytes)
   162  	if fileType == ZipType {
   163  		descBytes, err = DecompressData(descBytes)
   164  		if err != nil {
   165  			return BackupManifest{}, errors.Wrap(
   166  				err, "decompressing backup manifest")
   167  		}
   168  	}
   169  	var backupManifest BackupManifest
   170  	if err := protoutil.Unmarshal(descBytes, &backupManifest); err != nil {
   171  		if encryption == nil && storageccl.AppearsEncrypted(descBytes) {
   172  			return BackupManifest{}, errors.Wrapf(
   173  				err, "file appears encrypted -- try specifying %q", backupOptEncPassphrase)
   174  		}
   175  		return BackupManifest{}, err
   176  	}
   177  	for _, d := range backupManifest.Descriptors {
   178  		// Calls to GetTable are generally frowned upon.
   179  		// This specific call exists to provide backwards compatibility with
   180  		// backups created prior to version 19.1. Starting in v19.1 the
   181  		// ModificationTime is always written in backups for all versions
   182  		// of table descriptors. In earlier cockroach versions only later
   183  		// table descriptor versions contain a non-empty ModificationTime.
   184  		// Later versions of CockroachDB use the MVCC timestamp to fill in
   185  		// the ModificationTime for table descriptors. When performing a restore
   186  		// we no longer have access to that MVCC timestamp but we can set it
   187  		// to a value we know will be safe.
   188  		if t := d.GetTable(); t == nil {
   189  			continue
   190  		} else if t.Version == 1 && t.ModificationTime.IsEmpty() {
   191  			t.ModificationTime = hlc.Timestamp{WallTime: 1}
   192  		}
   193  	}
   194  	return backupManifest, err
   195  }
   196  
   197  func readBackupPartitionDescriptor(
   198  	ctx context.Context,
   199  	exportStore cloud.ExternalStorage,
   200  	filename string,
   201  	encryption *roachpb.FileEncryptionOptions,
   202  ) (BackupPartitionDescriptor, error) {
   203  	r, err := exportStore.ReadFile(ctx, filename)
   204  	if err != nil {
   205  		return BackupPartitionDescriptor{}, err
   206  	}
   207  	defer r.Close()
   208  	descBytes, err := ioutil.ReadAll(r)
   209  	if err != nil {
   210  		return BackupPartitionDescriptor{}, err
   211  	}
   212  	if encryption != nil {
   213  		descBytes, err = storageccl.DecryptFile(descBytes, encryption.Key)
   214  		if err != nil {
   215  			return BackupPartitionDescriptor{}, err
   216  		}
   217  	}
   218  	fileType := http.DetectContentType(descBytes)
   219  	if fileType == ZipType {
   220  		descBytes, err = DecompressData(descBytes)
   221  		if err != nil {
   222  			return BackupPartitionDescriptor{}, errors.Wrap(
   223  				err, "decompressing backup partition descriptor")
   224  		}
   225  	}
   226  	var backupManifest BackupPartitionDescriptor
   227  	if err := protoutil.Unmarshal(descBytes, &backupManifest); err != nil {
   228  		return BackupPartitionDescriptor{}, err
   229  	}
   230  	return backupManifest, err
   231  }
   232  
   233  func writeBackupManifest(
   234  	ctx context.Context,
   235  	settings *cluster.Settings,
   236  	exportStore cloud.ExternalStorage,
   237  	filename string,
   238  	encryption *roachpb.FileEncryptionOptions,
   239  	desc *BackupManifest,
   240  ) error {
   241  	sort.Sort(BackupFileDescriptors(desc.Files))
   242  
   243  	descBuf, err := protoutil.Marshal(desc)
   244  	if err != nil {
   245  		return err
   246  	}
   247  	descBuf, err = compressData(descBuf)
   248  	if err != nil {
   249  		return errors.Wrap(err, "compressing backup manifest")
   250  	}
   251  
   252  	if encryption != nil {
   253  		descBuf, err = storageccl.EncryptFile(descBuf, encryption.Key)
   254  		if err != nil {
   255  			return err
   256  		}
   257  	}
   258  
   259  	return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
   260  }
   261  
   262  // writeBackupPartitionDescriptor writes metadata (containing a locality KV and
   263  // partial file listing) for a partitioned BACKUP to one of the stores in the
   264  // backup.
   265  func writeBackupPartitionDescriptor(
   266  	ctx context.Context,
   267  	exportStore cloud.ExternalStorage,
   268  	filename string,
   269  	encryption *roachpb.FileEncryptionOptions,
   270  	desc *BackupPartitionDescriptor,
   271  ) error {
   272  	descBuf, err := protoutil.Marshal(desc)
   273  	if err != nil {
   274  		return err
   275  	}
   276  	descBuf, err = compressData(descBuf)
   277  	if err != nil {
   278  		return errors.Wrap(err, "compressing backup partition descriptor")
   279  	}
   280  	if encryption != nil {
   281  		descBuf, err = storageccl.EncryptFile(descBuf, encryption.Key)
   282  		if err != nil {
   283  			return err
   284  		}
   285  	}
   286  
   287  	return exportStore.WriteFile(ctx, filename, bytes.NewReader(descBuf))
   288  }
   289  
   290  func loadBackupManifests(
   291  	ctx context.Context,
   292  	uris []string,
   293  	makeExternalStorageFromURI cloud.ExternalStorageFromURIFactory,
   294  	encryption *roachpb.FileEncryptionOptions,
   295  ) ([]BackupManifest, error) {
   296  	backupManifests := make([]BackupManifest, len(uris))
   297  
   298  	for i, uri := range uris {
   299  		desc, err := ReadBackupManifestFromURI(ctx, uri, makeExternalStorageFromURI, encryption)
   300  		if err != nil {
   301  			return nil, errors.Wrapf(err, "failed to read backup descriptor")
   302  		}
   303  		backupManifests[i] = desc
   304  	}
   305  	if len(backupManifests) == 0 {
   306  		return nil, errors.Newf("no backups found")
   307  	}
   308  	return backupManifests, nil
   309  }
   310  
   311  // getLocalityInfo takes a list of stores and their URIs, along with the main
   312  // backup manifest searches each for the locality pieces listed in the the
   313  // main manifest, returning the mapping.
   314  func getLocalityInfo(
   315  	ctx context.Context,
   316  	stores []cloud.ExternalStorage,
   317  	uris []string,
   318  	mainBackupManifest BackupManifest,
   319  	encryption *roachpb.FileEncryptionOptions,
   320  	prefix string,
   321  ) (jobspb.RestoreDetails_BackupLocalityInfo, error) {
   322  	var info jobspb.RestoreDetails_BackupLocalityInfo
   323  	// Now get the list of expected partial per-store backup manifest filenames
   324  	// and attempt to find them.
   325  	urisByOrigLocality := make(map[string]string)
   326  	for _, filename := range mainBackupManifest.PartitionDescriptorFilenames {
   327  		if prefix != "" {
   328  			filename = path.Join(prefix, filename)
   329  		}
   330  		found := false
   331  		for i, store := range stores {
   332  			if desc, err := readBackupPartitionDescriptor(ctx, store, filename, encryption); err == nil {
   333  				if desc.BackupID != mainBackupManifest.ID {
   334  					return info, errors.Errorf(
   335  						"expected backup part to have backup ID %s, found %s",
   336  						mainBackupManifest.ID, desc.BackupID,
   337  					)
   338  				}
   339  				origLocalityKV := desc.LocalityKV
   340  				kv := roachpb.Tier{}
   341  				if err := kv.FromString(origLocalityKV); err != nil {
   342  					return info, errors.Wrapf(err, "reading backup manifest from %s", uris[i])
   343  				}
   344  				if _, ok := urisByOrigLocality[origLocalityKV]; ok {
   345  					return info, errors.Errorf("duplicate locality %s found in backup", origLocalityKV)
   346  				}
   347  				urisByOrigLocality[origLocalityKV] = uris[i]
   348  				found = true
   349  				break
   350  			}
   351  		}
   352  		if !found {
   353  			return info, errors.Errorf("expected manifest %s not found in backup locations", filename)
   354  		}
   355  	}
   356  	info.URIsByOriginalLocalityKV = urisByOrigLocality
   357  	return info, nil
   358  }
   359  
   360  // findPriorBackups finds "appended" incremental backups by searching for the
   361  // subdirectories matching the naming pattern (e.g. YYMMDD/HHmmss.ss). Using
   362  // file-system searching rather than keeping an explicit list allows layers to
   363  // be manually moved/removed/etc without needing to update/maintain said list.
   364  func findPriorBackups(ctx context.Context, store cloud.ExternalStorage) ([]string, error) {
   365  	prev, err := store.ListFiles(ctx, "[0-9]*/[0-9]*.[0-9][0-9]/"+BackupManifestName)
   366  	if err != nil {
   367  		return nil, errors.Wrap(err, "reading previous backup layers")
   368  	}
   369  	sort.Strings(prev)
   370  	return prev, nil
   371  }
   372  
   373  // resolveBackupManifests resolves a list of list of URIs that point to the
   374  // incremental layers (each of which can be partitioned) of backups into the
   375  // actual backup manifests and metadata required to RESTORE. If only one layer
   376  // is explicitly provided, it is inspected to see if it contains "appended"
   377  // layers internally that are then expanded into the result layers returned,
   378  // similar to if those layers had been specified in `from` explicitly.
   379  func resolveBackupManifests(
   380  	ctx context.Context,
   381  	baseStores []cloud.ExternalStorage,
   382  	mkStore cloud.ExternalStorageFromURIFactory,
   383  	from [][]string,
   384  	endTime hlc.Timestamp,
   385  	encryption *roachpb.FileEncryptionOptions,
   386  ) (
   387  	defaultURIs []string,
   388  	mainBackupManifests []BackupManifest,
   389  	localityInfo []jobspb.RestoreDetails_BackupLocalityInfo,
   390  	_ error,
   391  ) {
   392  	baseManifest, err := readBackupManifestFromStore(ctx, baseStores[0], encryption)
   393  	if err != nil {
   394  		return nil, nil, nil, err
   395  	}
   396  
   397  	// If explicit incremental backups were are passed, we simply load them one
   398  	// by one as specified and return the results.
   399  	if len(from) > 1 {
   400  		defaultURIs = make([]string, len(from))
   401  		localityInfo = make([]jobspb.RestoreDetails_BackupLocalityInfo, len(from))
   402  		mainBackupManifests = make([]BackupManifest, len(from))
   403  
   404  		for i, uris := range from {
   405  			// The first URI in the list must contain the main BACKUP manifest.
   406  			defaultURIs[i] = uris[0]
   407  
   408  			stores := make([]cloud.ExternalStorage, len(uris))
   409  			for j := range uris {
   410  				stores[j], err = mkStore(ctx, uris[j])
   411  				if err != nil {
   412  					return nil, nil, nil, errors.Wrapf(err, "export configuration")
   413  				}
   414  				defer stores[j].Close()
   415  			}
   416  
   417  			mainBackupManifests[i], err = readBackupManifestFromStore(ctx, stores[0], encryption)
   418  			if err != nil {
   419  				return nil, nil, nil, err
   420  			}
   421  			if len(uris) > 1 {
   422  				localityInfo[i], err = getLocalityInfo(
   423  					ctx, stores, uris, mainBackupManifests[i], encryption, "", /* prefix */
   424  				)
   425  				if err != nil {
   426  					return nil, nil, nil, err
   427  				}
   428  			}
   429  		}
   430  		if err != nil {
   431  			return nil, nil, nil, err
   432  		}
   433  	} else {
   434  		// Since incremental layers were *not* explicitly specified, search for any
   435  		// automatically created incremental layers inside the base layer.
   436  		prev, err := findPriorBackups(ctx, baseStores[0])
   437  		if err != nil {
   438  			if errors.Is(err, cloud.ErrListingUnsupported) {
   439  				log.Warningf(ctx, "storage sink %T does not support listing, only resolving the base backup", baseStores[0])
   440  				// If we do not support listing, we have to just assume there are none
   441  				// and restore the specified base.
   442  				prev = nil
   443  			} else {
   444  				return nil, nil, nil, err
   445  			}
   446  		}
   447  
   448  		numLayers := len(prev) + 1
   449  
   450  		defaultURIs = make([]string, numLayers)
   451  		mainBackupManifests = make([]BackupManifest, numLayers)
   452  		localityInfo = make([]jobspb.RestoreDetails_BackupLocalityInfo, numLayers)
   453  
   454  		// Setup the base layer explicitly.
   455  		defaultURIs[0] = from[0][0]
   456  		mainBackupManifests[0] = baseManifest
   457  		localityInfo[0], err = getLocalityInfo(
   458  			ctx, baseStores, from[0], baseManifest, encryption, "", /* prefix */
   459  		)
   460  		if err != nil {
   461  			return nil, nil, nil, err
   462  		}
   463  
   464  		// If we discovered additional layers, handle them too.
   465  		if numLayers > 1 {
   466  			numPartitions := len(from[0])
   467  			// We need the parsed baseURI for each partition to calculate the URI to
   468  			// each layer in that partition below.
   469  			baseURIs := make([]*url.URL, numPartitions)
   470  			for i := range from[0] {
   471  				baseURIs[i], err = url.Parse(from[0][i])
   472  				if err != nil {
   473  					return nil, nil, nil, err
   474  				}
   475  			}
   476  
   477  			// For each layer, we need to load the base manifest then calculate the URI and the
   478  			// locality info for each partition.
   479  			for i := range prev {
   480  				defaultManifestForLayer, err := readBackupManifest(ctx, baseStores[0], prev[i], encryption)
   481  				if err != nil {
   482  					return nil, nil, nil, err
   483  				}
   484  				mainBackupManifests[i+1] = defaultManifestForLayer
   485  
   486  				// prev[i] is the path to the manifest file itself for layer i -- the
   487  				// dirname piece of that path is the subdirectory in each of the
   488  				// partitions in which we'll also expect to find a partition manifest.
   489  				subDir := path.Dir(prev[i])
   490  				partitionURIs := make([]string, numPartitions)
   491  				for j := range baseURIs {
   492  					u := *baseURIs[j] // NB: makes a copy to avoid mutating the baseURI.
   493  					u.Path = path.Join(u.Path, subDir)
   494  					partitionURIs[j] = u.String()
   495  				}
   496  				defaultURIs[i+1] = partitionURIs[0]
   497  				localityInfo[i+1], err = getLocalityInfo(ctx, baseStores, partitionURIs, defaultManifestForLayer, encryption, subDir)
   498  				if err != nil {
   499  					return nil, nil, nil, err
   500  				}
   501  			}
   502  		}
   503  	}
   504  
   505  	// Check that the requested target time, if specified, is valid for the list
   506  	// of incremental backups resolved, truncating the results to the backup that
   507  	// contains the target time.
   508  	if !endTime.IsEmpty() {
   509  		ok := false
   510  		for i, b := range mainBackupManifests {
   511  			// Find the backup that covers the requested time.
   512  			if b.StartTime.Less(endTime) && endTime.LessEq(b.EndTime) {
   513  				ok = true
   514  
   515  				mainBackupManifests = mainBackupManifests[:i+1]
   516  				defaultURIs = defaultURIs[:i+1]
   517  				localityInfo = localityInfo[:i+1]
   518  
   519  				// Ensure that the backup actually has revision history.
   520  				if !endTime.Equal(b.EndTime) {
   521  					if b.MVCCFilter != MVCCFilter_All {
   522  						const errPrefix = "invalid RESTORE timestamp: restoring to arbitrary time requires that BACKUP for requested time be created with '%s' option."
   523  						if i == 0 {
   524  							return nil, nil, nil, errors.Errorf(
   525  								errPrefix+" nearest backup time is %s", backupOptRevisionHistory, b.EndTime,
   526  							)
   527  						}
   528  						return nil, nil, nil, errors.Errorf(
   529  							errPrefix+" nearest BACKUP times are %s or %s",
   530  							backupOptRevisionHistory, mainBackupManifests[i-1].EndTime, b.EndTime,
   531  						)
   532  					}
   533  					// Ensure that the revision history actually covers the requested time -
   534  					// while the BACKUP's start and end might contain the requested time for
   535  					// example if start time is 0 (full backup), the revision history was
   536  					// only captured since the GC window. Note that the RevisionStartTime is
   537  					// the latest for ranges backed up.
   538  					if endTime.LessEq(b.RevisionStartTime) {
   539  						return nil, nil, nil, errors.Errorf(
   540  							"invalid RESTORE timestamp: BACKUP for requested time only has revision history from %v", b.RevisionStartTime,
   541  						)
   542  					}
   543  				}
   544  				break
   545  			}
   546  		}
   547  
   548  		if !ok {
   549  			return nil, nil, nil, errors.Errorf(
   550  				"invalid RESTORE timestamp: supplied backups do not cover requested time",
   551  			)
   552  		}
   553  	}
   554  
   555  	return defaultURIs, mainBackupManifests, localityInfo, nil
   556  }
   557  
   558  func loadSQLDescsFromBackupsAtTime(
   559  	backupManifests []BackupManifest, asOf hlc.Timestamp,
   560  ) ([]sqlbase.Descriptor, BackupManifest) {
   561  	lastBackupManifest := backupManifests[len(backupManifests)-1]
   562  
   563  	if asOf.IsEmpty() {
   564  		return lastBackupManifest.Descriptors, lastBackupManifest
   565  	}
   566  
   567  	for _, b := range backupManifests {
   568  		if asOf.Less(b.StartTime) {
   569  			break
   570  		}
   571  		lastBackupManifest = b
   572  	}
   573  	if len(lastBackupManifest.DescriptorChanges) == 0 {
   574  		return lastBackupManifest.Descriptors, lastBackupManifest
   575  	}
   576  
   577  	byID := make(map[sqlbase.ID]*sqlbase.Descriptor, len(lastBackupManifest.Descriptors))
   578  	for _, rev := range lastBackupManifest.DescriptorChanges {
   579  		if asOf.Less(rev.Time) {
   580  			break
   581  		}
   582  		if rev.Desc == nil {
   583  			delete(byID, rev.ID)
   584  		} else {
   585  			byID[rev.ID] = rev.Desc
   586  		}
   587  	}
   588  
   589  	allDescs := make([]sqlbase.Descriptor, 0, len(byID))
   590  	for _, desc := range byID {
   591  		if t := desc.Table(hlc.Timestamp{}); t != nil {
   592  			// A table revisions may have been captured before it was in a DB that is
   593  			// backed up -- if the DB is missing, filter the table.
   594  			if byID[t.ParentID] == nil {
   595  				continue
   596  			}
   597  		}
   598  		allDescs = append(allDescs, *desc)
   599  	}
   600  	return allDescs, lastBackupManifest
   601  }
   602  
   603  // sanitizeLocalityKV returns a sanitized version of the input string where all
   604  // characters that are not alphanumeric or -, =, or _ are replaced with _.
   605  func sanitizeLocalityKV(kv string) string {
   606  	sanitizedKV := make([]byte, len(kv))
   607  	for i := 0; i < len(kv); i++ {
   608  		if (kv[i] >= 'a' && kv[i] <= 'z') ||
   609  			(kv[i] >= 'A' && kv[i] <= 'Z') ||
   610  			(kv[i] >= '0' && kv[i] <= '9') || kv[i] == '-' || kv[i] == '=' {
   611  			sanitizedKV[i] = kv[i]
   612  		} else {
   613  			sanitizedKV[i] = '_'
   614  		}
   615  	}
   616  	return string(sanitizedKV)
   617  }
   618  
   619  func readEncryptionOptions(
   620  	ctx context.Context, src cloud.ExternalStorage,
   621  ) (*EncryptionInfo, error) {
   622  	r, err := src.ReadFile(ctx, "encryption-info")
   623  	if err != nil {
   624  		return nil, errors.Wrap(err, "could not find or read encryption information")
   625  	}
   626  	defer r.Close()
   627  	encInfoBytes, err := ioutil.ReadAll(r)
   628  	if err != nil {
   629  		return nil, errors.Wrap(err, "could not find or read encryption information")
   630  	}
   631  	var encInfo EncryptionInfo
   632  	if err := protoutil.Unmarshal(encInfoBytes, &encInfo); err != nil {
   633  		return nil, err
   634  	}
   635  	return &encInfo, nil
   636  }
   637  
   638  func writeEncryptionOptions(
   639  	ctx context.Context, opts *EncryptionInfo, dest cloud.ExternalStorage,
   640  ) error {
   641  	buf, err := protoutil.Marshal(opts)
   642  	if err != nil {
   643  		return err
   644  	}
   645  	if err := dest.WriteFile(ctx, "encryption-info", bytes.NewReader(buf)); err != nil {
   646  		return err
   647  	}
   648  	return nil
   649  }
   650  
   651  // VerifyUsableExportTarget ensures that the target location does not already
   652  // contain a BACKUP or checkpoint and writes an empty checkpoint, both verifying
   653  // that the location is writable and locking out accidental concurrent
   654  // operations on that location if subsequently try this check. Callers must
   655  // clean up the written checkpoint file (BackupManifestCheckpointName) only
   656  // after writing to the backup file location (BackupManifestName).
   657  func VerifyUsableExportTarget(
   658  	ctx context.Context,
   659  	settings *cluster.Settings,
   660  	exportStore cloud.ExternalStorage,
   661  	readable string,
   662  	encryption *roachpb.FileEncryptionOptions,
   663  ) error {
   664  	if r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {
   665  		// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
   666  		// returns (and then wrap/tag them), we could narrow this check.
   667  		r.Close()
   668  		return pgerror.Newf(pgcode.FileAlreadyExists,
   669  			"%s already contains a %s file",
   670  			readable, BackupManifestName)
   671  	}
   672  	if r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {
   673  		// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
   674  		// returns (and then wrap/tag them), we could narrow this check.
   675  		r.Close()
   676  		return pgerror.Newf(pgcode.FileAlreadyExists,
   677  			"%s already contains a %s file",
   678  			readable, BackupManifestName)
   679  	}
   680  	if r, err := exportStore.ReadFile(ctx, BackupManifestCheckpointName); err == nil {
   681  		r.Close()
   682  		return pgerror.Newf(pgcode.FileAlreadyExists,
   683  			"%s already contains a %s file (is another operation already in progress?)",
   684  			readable, BackupManifestCheckpointName)
   685  	}
   686  	if err := writeBackupManifest(
   687  		ctx, settings, exportStore, BackupManifestCheckpointName, encryption, &BackupManifest{},
   688  	); err != nil {
   689  		return errors.Wrapf(err, "cannot write to %s", readable)
   690  	}
   691  	return nil
   692  }