github.com/greenplum-db/gpbackup@v0.0.0-20240517212602-89daab1885b3/end_to_end/end_to_end_suite_test.go (about)

     1  package end_to_end_test
     2  
     3  import (
     4  	"encoding/csv"
     5  	"flag"
     6  	"fmt"
     7  	"io/fs"
     8  	"io/ioutil"
     9  	"os"
    10  	"os/exec"
    11  	path "path/filepath"
    12  	"reflect"
    13  	"regexp"
    14  	"sort"
    15  	"strconv"
    16  	"strings"
    17  	"sync"
    18  	"testing"
    19  	"time"
    20  
    21  	"github.com/blang/semver"
    22  	"github.com/greenplum-db/gp-common-go-libs/cluster"
    23  	"github.com/greenplum-db/gp-common-go-libs/dbconn"
    24  	"github.com/greenplum-db/gp-common-go-libs/iohelper"
    25  	"github.com/greenplum-db/gp-common-go-libs/operating"
    26  	"github.com/greenplum-db/gp-common-go-libs/structmatcher"
    27  	"github.com/greenplum-db/gp-common-go-libs/testhelper"
    28  	"github.com/greenplum-db/gpbackup/backup"
    29  	"github.com/greenplum-db/gpbackup/filepath"
    30  	"github.com/greenplum-db/gpbackup/testutils"
    31  	"github.com/greenplum-db/gpbackup/toc"
    32  	"github.com/greenplum-db/gpbackup/utils"
    33  	"github.com/pkg/errors"
    34  	"github.com/spf13/pflag"
    35  
    36  	. "github.com/onsi/ginkgo/v2"
    37  	. "github.com/onsi/gomega"
    38  	"github.com/onsi/gomega/format"
    39  	. "github.com/onsi/gomega/gexec"
    40  
    41  	"gopkg.in/yaml.v2"
    42  )
    43  
    44  /* The backup directory must be unique per test. There is test flakiness
    45   * against Data Domain Boost mounted file systems due to how it handles
    46   * directory deletion/creation.
    47   */
    48  var (
    49  	customBackupDir string
    50  
    51  	useOldBackupVersion bool
    52  	oldBackupSemVer     semver.Version
    53  
    54  	backupCluster           *cluster.Cluster
    55  	historyFilePath         string
    56  	saveHistoryFilePath     = "/tmp/end_to_end_save_history_file.yaml"
    57  	testFailure             bool
    58  	backupConn              *dbconn.DBConn
    59  	restoreConn             *dbconn.DBConn
    60  	gpbackupPath            string
    61  	backupHelperPath        string
    62  	restoreHelperPath       string
    63  	gprestorePath           string
    64  	examplePluginDir        string
    65  	examplePluginExec       string
    66  	examplePluginTestConfig = "/tmp/test_example_plugin_config.yaml"
    67  	examplePluginTestDir    = "/tmp/plugin_dest" // hardcoded in example plugin
    68  	publicSchemaTupleCounts map[string]int
    69  	schema2TupleCounts      map[string]int
    70  	backupDir               string
    71  	segmentCount            int
    72  )
    73  
    74  const (
    75  	TOTAL_RELATIONS               = 37
    76  	TOTAL_RELATIONS_AFTER_EXCLUDE = 21
    77  	TOTAL_CREATE_STATEMENTS       = 9
    78  )
    79  
    80  // This function is run automatically by ginkgo before any tests are run.
    81  func init() {
    82  	flag.StringVar(&customBackupDir, "custom_backup_dir", "/tmp",
    83  		"custom_backup_flag for testing against a configurable directory")
    84  }
    85  
    86  /* This function is a helper function to execute gpbackup and return a session
    87   * to allow checking its output.
    88   */
    89  func gpbackup(gpbackupPath string, backupHelperPath string, args ...string) []byte {
    90  	if useOldBackupVersion {
    91  		_ = os.Chdir("..")
    92  		command := exec.Command("make", "install", fmt.Sprintf("helper_path=%s", backupHelperPath))
    93  		mustRunCommand(command)
    94  		_ = os.Chdir("end_to_end")
    95  	}
    96  	args = append([]string{"--verbose", "--dbname", "testdb"}, args...)
    97  	command := exec.Command(gpbackupPath, args...)
    98  	return mustRunCommand(command)
    99  }
   100  
   101  func gprestore(gprestorePath string, restoreHelperPath string, timestamp string, args ...string) []byte {
   102  	if useOldBackupVersion {
   103  		_ = os.Chdir("..")
   104  		command := exec.Command("make", "install",
   105  			fmt.Sprintf("helper_path=%s", restoreHelperPath))
   106  		mustRunCommand(command)
   107  		_ = os.Chdir("end_to_end")
   108  	}
   109  	args = append([]string{"--verbose", "--timestamp", timestamp}, args...)
   110  	command := exec.Command(gprestorePath, args...)
   111  	output := mustRunCommand(command)
   112  	return output
   113  }
   114  
   115  func buildAndInstallBinaries() (string, string, string) {
   116  	_ = os.Chdir("..")
   117  	command := exec.Command("make", "build")
   118  	mustRunCommand(command)
   119  	_ = os.Chdir("end_to_end")
   120  	binDir := fmt.Sprintf("%s/go/bin", operating.System.Getenv("HOME"))
   121  	return fmt.Sprintf("%s/gpbackup", binDir), fmt.Sprintf("%s/gpbackup_helper", binDir), fmt.Sprintf("%s/gprestore", binDir)
   122  }
   123  
   124  func buildOldBinaries(version string) (string, string) {
   125  	_ = os.Chdir("..")
   126  	command := exec.Command("git", "checkout", version, "-f")
   127  	mustRunCommand(command)
   128  	command = exec.Command("dep", "ensure")
   129  	mustRunCommand(command)
   130  	gpbackupOldPath, err := Build("github.com/greenplum-db/gpbackup",
   131  		"-tags", "gpbackup", "-ldflags",
   132  		fmt.Sprintf("-X github.com/greenplum-db/gpbackup/backup.version=%s", version))
   133  	Expect(err).ShouldNot(HaveOccurred())
   134  	gpbackupHelperOldPath, err := Build("github.com/greenplum-db/gpbackup",
   135  		"-tags", "gpbackup_helper", "-ldflags",
   136  		fmt.Sprintf("-X github.com/greenplum-db/gpbackup/helper.version=%s", version))
   137  	Expect(err).ShouldNot(HaveOccurred())
   138  	command = exec.Command("git", "checkout", "-", "-f")
   139  	mustRunCommand(command)
   140  	command = exec.Command("dep", "ensure")
   141  	mustRunCommand(command)
   142  	_ = os.Chdir("end_to_end")
   143  	return gpbackupOldPath, gpbackupHelperOldPath
   144  }
   145  
   146  func assertDataRestored(conn *dbconn.DBConn, tableToTupleCount map[string]int) {
   147  	for tableName, expectedNumTuples := range tableToTupleCount {
   148  		actualTupleCount := dbconn.MustSelectString(conn, fmt.Sprintf("SELECT count(*) AS string FROM %s", tableName))
   149  		if strconv.Itoa(expectedNumTuples) != actualTupleCount {
   150  			Fail(fmt.Sprintf("Expected:\n\t%s rows to have been restored into table %s\nActual:\n\t%s rows were restored", strconv.Itoa(expectedNumTuples), tableName, actualTupleCount))
   151  		}
   152  	}
   153  }
   154  
   155  func checkTableExists(conn *dbconn.DBConn, tableName string) bool {
   156  	var schema, table string
   157  	s := strings.Split(tableName, ".")
   158  	if len(s) == 2 {
   159  		schema, table = s[0], s[1]
   160  	} else if len(s) == 1 {
   161  		schema = "public"
   162  		table = s[0]
   163  	} else {
   164  		Fail(fmt.Sprintf("Table %s is not in a valid format", tableName))
   165  	}
   166  	exists := dbconn.MustSelectString(conn, fmt.Sprintf("SELECT EXISTS (SELECT * FROM pg_tables WHERE schemaname = '%s' AND tablename = '%s') AS string", schema, table))
   167  	return (exists == "true")
   168  }
   169  
   170  func assertTablesRestored(conn *dbconn.DBConn, tables []string) {
   171  	for _, tableName := range tables {
   172  		if !checkTableExists(conn, tableName) {
   173  			Fail(fmt.Sprintf("Table %s does not exist when it should", tableName))
   174  		}
   175  	}
   176  }
   177  
   178  func assertTablesNotRestored(conn *dbconn.DBConn, tables []string) {
   179  	for _, tableName := range tables {
   180  		if checkTableExists(conn, tableName) {
   181  			Fail(fmt.Sprintf("Table %s exists when it should not", tableName))
   182  		}
   183  	}
   184  }
   185  
   186  func unMarshalRowCounts(filepath string) map[string]int {
   187  	rowFile, err := os.Open(filepath)
   188  
   189  	if err != nil {
   190  		Fail(fmt.Sprintf("Failed to open rowcount file: %s. Error: %s", filepath, err.Error()))
   191  	}
   192  	defer rowFile.Close()
   193  
   194  	reader := csv.NewReader(rowFile)
   195  	reader.Comma = '|'
   196  	reader.FieldsPerRecord = -1
   197  	rowData, err := reader.ReadAll()
   198  	if err != nil {
   199  		Fail(fmt.Sprintf("Failed to initialize rowcount reader: %s. Error: %s", filepath, err.Error()))
   200  	}
   201  
   202  	allRecords := make(map[string]int)
   203  	for idx, each := range rowData {
   204  		if idx < 2 || idx == len(rowData)-1 {
   205  			continue
   206  		}
   207  		table_schema := strings.TrimSpace(each[0])
   208  		table_name := strings.TrimSpace(each[1])
   209  		seg_id, _ := strconv.Atoi(strings.TrimSpace(each[2]))
   210  		row_count, _ := strconv.Atoi(strings.TrimSpace(each[3]))
   211  
   212  		recordKey := fmt.Sprintf("%s_%s_%d", table_schema, table_name, seg_id)
   213  		allRecords[recordKey] = row_count
   214  	}
   215  
   216  	return allRecords
   217  }
   218  
   219  func assertSegmentDataRestored(contentID int, tableName string, rows int) {
   220  	segment := backupCluster.ByContent[contentID]
   221  	port := segment[0].Port
   222  	host := segment[0].Hostname
   223  	segConn := testutils.SetupTestDBConnSegment("restoredb", port, host, backupConn.Version)
   224  	defer segConn.Close()
   225  	assertDataRestored(segConn, map[string]int{tableName: rows})
   226  }
   227  
   228  type PGClassStats struct {
   229  	Relpages  int
   230  	Reltuples float32
   231  }
   232  
   233  func assertPGClassStatsRestored(backupConn *dbconn.DBConn, restoreConn *dbconn.DBConn, tableToTupleCount map[string]int) {
   234  	for tableName := range tableToTupleCount {
   235  		backupStats := make([]PGClassStats, 0)
   236  		restoreStats := make([]PGClassStats, 0)
   237  		pgClassStatsQuery := fmt.Sprintf("SELECT relpages, reltuples FROM pg_class WHERE oid='%s'::regclass::oid", tableName)
   238  		backupErr := backupConn.Select(&backupStats, pgClassStatsQuery)
   239  		restoreErr := restoreConn.Select(&restoreStats, pgClassStatsQuery)
   240  		if backupErr != nil {
   241  			Fail(fmt.Sprintf("Unable to get pg_class stats for table '%s' on the backup database", tableName))
   242  		} else if restoreErr != nil {
   243  			Fail(fmt.Sprintf("Unable to get pg_class stats for table '%s' on the restore database: %s", tableName, restoreErr))
   244  		}
   245  
   246  		if backupStats[0].Relpages != restoreStats[0].Relpages && backupStats[0].Reltuples != restoreStats[0].Reltuples {
   247  			Fail(fmt.Sprintf("The pg_class stats for table '%s' do not match: %v != %v", tableName, backupStats, restoreStats))
   248  		}
   249  	}
   250  }
   251  
   252  func assertSchemasExist(conn *dbconn.DBConn, expectedNumSchemas int) {
   253  	countQuery := `SELECT COUNT(n.nspname) FROM pg_catalog.pg_namespace n WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' ORDER BY 1;`
   254  	actualSchemaCount := dbconn.MustSelectString(conn, countQuery)
   255  	if strconv.Itoa(expectedNumSchemas) != actualSchemaCount {
   256  		Fail(fmt.Sprintf("Expected:\n\t%s schemas to exist in the DB\nActual:\n\t%s schemas are in the DB", strconv.Itoa(expectedNumSchemas), actualSchemaCount))
   257  	}
   258  }
   259  
   260  func assertRelationsCreated(conn *dbconn.DBConn, expectedNumTables int) {
   261  	countQuery := `SELECT count(*) AS string FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('S','v','r','p') AND n.nspname IN ('public', 'schema2');`
   262  	actualTableCount := dbconn.MustSelectString(conn, countQuery)
   263  	if strconv.Itoa(expectedNumTables) != actualTableCount {
   264  		Fail(fmt.Sprintf("Expected:\n\t%s relations to have been created\nActual:\n\t%s relations were created", strconv.Itoa(expectedNumTables), actualTableCount))
   265  	}
   266  }
   267  
   268  func assertRelationsCreatedInSchema(conn *dbconn.DBConn, schema string, expectedNumTables int) {
   269  	countQuery := fmt.Sprintf(`SELECT count(*) AS string FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('S','v','r','p') AND n.nspname = '%s'`, schema)
   270  	actualTableCount := dbconn.MustSelectString(conn, countQuery)
   271  	if strconv.Itoa(expectedNumTables) != actualTableCount {
   272  		Fail(fmt.Sprintf("Expected:\n\t%s relations to have been created\nActual:\n\t%s relations were created", strconv.Itoa(expectedNumTables), actualTableCount))
   273  	}
   274  }
   275  
   276  func assertRelationsExistForIncremental(conn *dbconn.DBConn, expectedNumTables int) {
   277  	countQuery := `SELECT count(*) AS string FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('S','v','r','p') AND n.nspname IN ('old_schema', 'new_schema');`
   278  	actualTableCount := dbconn.MustSelectString(conn, countQuery)
   279  	if strconv.Itoa(expectedNumTables) != actualTableCount {
   280  		Fail(fmt.Sprintf("Expected:\n\t%s relations to exist in old_schema and new_schema\nActual:\n\t%s relations are present", strconv.Itoa(expectedNumTables), actualTableCount))
   281  	}
   282  }
   283  
   284  func assertArtifactsCleaned(timestamp string) {
   285  	cmdStr := fmt.Sprintf("ps -ef | grep -v grep | grep -E gpbackup_helper.*%s || true", timestamp)
   286  	output := mustRunCommand(exec.Command("bash", "-c", cmdStr))
   287  	Eventually(func() string { return strings.TrimSpace(string(output)) }, 30*time.Second, 100*time.Millisecond).Should(Equal(""))
   288  
   289  	fpInfo := filepath.NewFilePathInfo(backupCluster, "", timestamp, "", false)
   290  	description := "Checking if helper files are cleaned up properly"
   291  	cleanupFunc := func(contentID int) string {
   292  		errorFile := fmt.Sprintf("%s_error", fpInfo.GetSegmentPipeFilePath(contentID))
   293  		oidFile := fpInfo.GetSegmentHelperFilePath(contentID, "oid")
   294  		scriptFile := fpInfo.GetSegmentHelperFilePath(contentID, "script")
   295  		pipeFile := fpInfo.GetSegmentPipeFilePath(contentID)
   296  
   297  		return fmt.Sprintf("! ls %s && ! ls %s && ! ls %s && ! ls %s*", errorFile, oidFile, scriptFile, pipeFile)
   298  	}
   299  	remoteOutput := backupCluster.GenerateAndExecuteCommand(description, cluster.ON_SEGMENTS|cluster.INCLUDE_COORDINATOR, cleanupFunc)
   300  	if remoteOutput.NumErrors != 0 {
   301  		Fail(fmt.Sprintf("Helper files found for timestamp %s", timestamp))
   302  	}
   303  }
   304  
   305  func mustRunCommand(cmd *exec.Cmd) []byte {
   306  	output, err := cmd.CombinedOutput()
   307  	if err != nil {
   308  		testFailure = true
   309  		fmt.Printf("%s", output)
   310  		Fail(fmt.Sprintf("%v", err))
   311  	}
   312  	return output
   313  }
   314  
   315  func skipIfOldBackupVersionBefore(version string) {
   316  	if useOldBackupVersion && oldBackupSemVer.LT(semver.MustParse(version)) {
   317  		Skip(fmt.Sprintf("Feature not supported in gpbackup %s", oldBackupSemVer))
   318  	}
   319  }
   320  
   321  func createGlobalObjects(conn *dbconn.DBConn) {
   322  	if conn.Version.Before("6") {
   323  		testhelper.AssertQueryRuns(conn, "CREATE TABLESPACE test_tablespace FILESPACE test_dir")
   324  	} else {
   325  		testhelper.AssertQueryRuns(conn, "CREATE TABLESPACE test_tablespace LOCATION '/tmp/test_dir';")
   326  	}
   327  	testhelper.AssertQueryRuns(conn, "CREATE RESOURCE QUEUE test_queue WITH (ACTIVE_STATEMENTS=5);")
   328  	testhelper.AssertQueryRuns(conn, "CREATE ROLE global_role RESOURCE QUEUE test_queue;")
   329  	testhelper.AssertQueryRuns(conn, "CREATE ROLE testrole;")
   330  	testhelper.AssertQueryRuns(conn, "GRANT testrole TO global_role;")
   331  	testhelper.AssertQueryRuns(conn, "CREATE DATABASE global_db TABLESPACE test_tablespace;")
   332  	testhelper.AssertQueryRuns(conn, "ALTER DATABASE global_db OWNER TO global_role;")
   333  	testhelper.AssertQueryRuns(conn, "ALTER ROLE global_role SET search_path TO public,pg_catalog;")
   334  	if conn.Version.Is("5") || conn.Version.Is("6") {
   335  		testhelper.AssertQueryRuns(conn, "CREATE RESOURCE GROUP test_group WITH (CPU_RATE_LIMIT=1, MEMORY_LIMIT=1);")
   336  	} else if conn.Version.AtLeast("7") {
   337  		testhelper.AssertQueryRuns(conn, "CREATE RESOURCE GROUP test_group WITH (CPU_MAX_PERCENT=1, MEMORY_LIMIT=1);")
   338  	}
   339  
   340  	testhelper.AssertQueryRuns(conn, "ALTER ROLE global_role RESOURCE GROUP test_group;")
   341  }
   342  
   343  func dropGlobalObjects(conn *dbconn.DBConn, dbExists bool) {
   344  	if dbExists {
   345  		testhelper.AssertQueryRuns(conn, "DROP DATABASE global_db;")
   346  	}
   347  	testhelper.AssertQueryRuns(conn, "DROP TABLESPACE test_tablespace;")
   348  	testhelper.AssertQueryRuns(conn, "DROP ROLE global_role;")
   349  	testhelper.AssertQueryRuns(conn, "DROP ROLE testrole;")
   350  	testhelper.AssertQueryRuns(conn, "DROP RESOURCE QUEUE test_queue;")
   351  	testhelper.AssertQueryRuns(conn, "DROP RESOURCE GROUP test_group;")
   352  }
   353  
   354  // fileSuffix should be one of: config.yaml, metadata.sql, toc.yaml, or report
   355  func getMetdataFileContents(backupDir string, timestamp string, fileSuffix string) []byte {
   356  	filePath := path.Join("backups", timestamp[:8], timestamp, fmt.Sprintf("gpbackup_%s_%s", timestamp, fileSuffix))
   357  	if _, err := os.Stat(path.Join(backupDir, "backups")); err != nil && os.IsNotExist(err) {
   358  		filePath = path.Join("*-1", filePath)
   359  	}
   360  	file, err := path.Glob(path.Join(backupDir, filePath))
   361  	Expect(err).ToNot(HaveOccurred())
   362  	fileContentBytes, err := ioutil.ReadFile(file[0])
   363  	Expect(err).ToNot(HaveOccurred())
   364  
   365  	return fileContentBytes
   366  }
   367  
   368  // check restore file exist and has right permissions
   369  func checkRestoreMetadataFile(backupDir string, timestamp string, fileSuffix string, withHierarchy bool) {
   370  	filePath := fmt.Sprintf("gprestore_%s_*_%s", timestamp, fileSuffix)
   371  	if withHierarchy {
   372  		filePath = path.Join("backups", timestamp[:8], timestamp, filePath)
   373  		if _, err := os.Stat(path.Join(backupDir, "backups")); err != nil && os.IsNotExist(err) {
   374  			filePath = path.Join("*-1", filePath)
   375  		}
   376  	}
   377  	file, err := path.Glob(path.Join(backupDir, filePath))
   378  	Expect(err).ToNot(HaveOccurred())
   379  	Expect(file).To(HaveLen(1))
   380  	info, err := os.Stat(file[0])
   381  	Expect(err).ToNot(HaveOccurred())
   382  	if info.Mode() != 0444 {
   383  		Fail(fmt.Sprintf("File %s is not read-only (mode is %v).", file[0], info.Mode()))
   384  	}
   385  }
   386  
   387  func saveHistory(myCluster *cluster.Cluster) {
   388  	// move history file out of the way, and replace in "after". This avoids adding junk to an existing gpackup_history.db
   389  
   390  	mdd := myCluster.GetDirForContent(-1)
   391  	historyFilePath = path.Join(mdd, "gpbackup_history.db")
   392  	_ = utils.CopyFile(historyFilePath, saveHistoryFilePath)
   393  }
   394  
   395  // Parse backup timestamp from gpbackup log string
   396  func getBackupTimestamp(output string) string {
   397  	r := regexp.MustCompile(`Backup Timestamp = (\d{14})`)
   398  	matches := r.FindStringSubmatch(output)
   399  	if len(matches) < 2 {
   400  		return ""
   401  	} else {
   402  		return matches[1]
   403  	}
   404  }
   405  
   406  // Helper function to extract saved backups and check that their permissions are correctly set
   407  // Leaving the coordinator metadata files read-only causes problems with DataDomain tests,
   408  // so developers should make them writable before saving a backup, and this check ensures
   409  // that that is caught during local testing.
   410  func extractSavedTarFile(backupDir string, tarBaseName string) string {
   411  	extractDirectory := path.Join(backupDir, tarBaseName)
   412  	os.Mkdir(extractDirectory, 0777)
   413  	command := exec.Command("tar", "-xzf", fmt.Sprintf("resources/%s.tar.gz", tarBaseName), "-C", extractDirectory)
   414  	mustRunCommand(command)
   415  
   416  	defer GinkgoRecover() // needed if calling Fail in a function such as Walk that uses goroutines
   417  
   418  	// Traverse the master data directory and check that the user write bit is set for all files
   419  	path.Walk(fmt.Sprintf("%s/demoDataDir-1", extractDirectory), func(p string, info fs.FileInfo, err error) error {
   420  		if info != nil && !info.IsDir() && info.Mode()&0200 != 0200 {
   421  			Fail(fmt.Sprintf("File %s is not user-writable (mode is %v); please make it writable before checking in this tar file.", p, info.Mode()))
   422  		}
   423  		return path.SkipDir
   424  	})
   425  
   426  	return extractDirectory
   427  }
   428  
   429  // Move extracted data files to the proper directory for a larger-to-smaller restore, if necessary
   430  // Assumes all saved backups have a name in the format "N-segment-db-..." where N is the original cluster size
   431  func moveSegmentBackupFiles(tarBaseName string, extractDirectory string, isMultiNode bool, timestamps ...string) {
   432  	re := regexp.MustCompile("^([0-9]+)-.*")
   433  	origSize, _ := strconv.Atoi(re.FindStringSubmatch(tarBaseName)[1])
   434  	for _, ts := range timestamps {
   435  		if ts != "" {
   436  			baseDir := fmt.Sprintf("%s/demoDataDir%s/backups/%s/%s", extractDirectory, "%d", ts[0:8], ts)
   437  			if isMultiNode {
   438  				remoteOutput := backupCluster.GenerateAndExecuteCommand("Create backup directories on segments", cluster.ON_SEGMENTS, func(contentID int) string {
   439  					return fmt.Sprintf("mkdir -p %s", fmt.Sprintf(baseDir, contentID))
   440  				})
   441  				backupCluster.CheckClusterError(remoteOutput, "Unable to create directories", func(contentID int) string {
   442  					return ""
   443  				})
   444  				for i := 0; i < origSize; i++ {
   445  					origDir := fmt.Sprintf(baseDir, i)
   446  					destDir := fmt.Sprintf(baseDir, i%segmentCount)
   447  					_, err := backupCluster.ExecuteLocalCommand(fmt.Sprintf(`rsync -r -e ssh %s/ %s:%s`, origDir, backupCluster.GetHostForContent(i%segmentCount), destDir))
   448  					if err != nil {
   449  						Fail(fmt.Sprintf("Could not copy %s to %s: %v", origDir, destDir, err))
   450  					}
   451  				}
   452  			} else {
   453  				for i := segmentCount; i < origSize; i++ {
   454  					origDir := fmt.Sprintf(baseDir, i)
   455  					destDir := fmt.Sprintf(baseDir, i%segmentCount)
   456  					files, _ := path.Glob(fmt.Sprintf("%s/*", origDir))
   457  					for _, dataFile := range files {
   458  						os.Rename(dataFile, fmt.Sprintf("%s/%s", destDir, path.Base(dataFile)))
   459  					}
   460  				}
   461  			}
   462  		}
   463  	}
   464  }
   465  
   466  func TestEndToEnd(t *testing.T) {
   467  	format.MaxLength = 0
   468  	RegisterFailHandler(Fail)
   469  	RunSpecs(t, "EndToEnd Suite")
   470  }
   471  
   472  var _ = BeforeSuite(func() {
   473  	// This is used to run tests from an older gpbackup version to gprestore latest
   474  	useOldBackupVersion = os.Getenv("OLD_BACKUP_VERSION") != ""
   475  
   476  	// Setup example plugin based on current working directory
   477  	err := os.RemoveAll(examplePluginTestDir)
   478  	Expect(err).ToNot(HaveOccurred())
   479  	err = os.MkdirAll(examplePluginTestDir, 0777)
   480  	Expect(err).ToNot(HaveOccurred())
   481  	currentDir, err := os.Getwd()
   482  	Expect(err).ToNot(HaveOccurred())
   483  	rootDir := path.Dir(currentDir)
   484  	examplePluginDir = path.Join(rootDir, "plugins")
   485  	examplePluginExec = path.Join(rootDir, "plugins", "example_plugin.bash")
   486  	examplePluginTestConfigContents := fmt.Sprintf(`executablepath: %s
   487  options:
   488    password: unknown`, examplePluginExec)
   489  	f, err := os.Create(examplePluginTestConfig)
   490  	Expect(err).ToNot(HaveOccurred())
   491  	_, err = f.WriteString(examplePluginTestConfigContents)
   492  	Expect(err).ToNot(HaveOccurred())
   493  	err = f.Close()
   494  	Expect(err).ToNot(HaveOccurred())
   495  
   496  	testhelper.SetupTestLogger()
   497  	_ = exec.Command("dropdb", "testdb").Run()
   498  	_ = exec.Command("dropdb", "restoredb").Run()
   499  	_ = exec.Command("psql", "postgres",
   500  		"-c", "DROP RESOURCE QUEUE test_queue").Run()
   501  
   502  	err = exec.Command("createdb", "testdb").Run()
   503  	if err != nil {
   504  		Fail(fmt.Sprintf("Could not create testdb: %v", err))
   505  	}
   506  	err = exec.Command("createdb", "restoredb").Run()
   507  	if err != nil {
   508  		Fail(fmt.Sprintf("Could not create restoredb: %v", err))
   509  	}
   510  	backupConn = testutils.SetupTestDbConn("testdb")
   511  	restoreConn = testutils.SetupTestDbConn("restoredb")
   512  	backupCmdFlags := pflag.NewFlagSet("gpbackup", pflag.ExitOnError)
   513  	backup.SetCmdFlags(backupCmdFlags)
   514  	backup.InitializeMetadataParams(backupConn)
   515  	backup.SetFilterRelationClause("")
   516  	testutils.ExecuteSQLFile(backupConn, "resources/test_tables_ddl.sql")
   517  	testutils.ExecuteSQLFile(backupConn, "resources/test_tables_data.sql")
   518  
   519  	// default GUC setting varies between versions so set it explicitly
   520  	testhelper.AssertQueryRuns(backupConn, "SET gp_autostats_mode='on_no_stats'")
   521  
   522  	if useOldBackupVersion {
   523  		oldBackupSemVer = semver.MustParse(os.Getenv("OLD_BACKUP_VERSION"))
   524  		oldBackupVersionStr := os.Getenv("OLD_BACKUP_VERSION")
   525  
   526  		_, restoreHelperPath, gprestorePath = buildAndInstallBinaries()
   527  
   528  		// Precompiled binaries will exist when running the ci job, `backward-compatibility`
   529  		if _, err := os.Stat(fmt.Sprintf("/tmp/%s", oldBackupVersionStr)); err == nil {
   530  			gpbackupPath = path.Join("/tmp", oldBackupVersionStr, "gpbackup")
   531  			backupHelperPath = path.Join("/tmp", oldBackupVersionStr, "gpbackup_helper")
   532  		} else {
   533  			gpbackupPath, backupHelperPath = buildOldBinaries(oldBackupVersionStr)
   534  		}
   535  	} else {
   536  		// Check if gpbackup binary has been installed using gppkg
   537  		gpHomeDir := operating.System.Getenv("GPHOME")
   538  		binDir := fmt.Sprintf("%s/go/bin", operating.System.Getenv("HOME"))
   539  		if _, err := os.Stat(fmt.Sprintf("%s/bin/gpbackup", gpHomeDir)); err == nil {
   540  			binDir = fmt.Sprintf("%s/bin", gpHomeDir)
   541  		}
   542  
   543  		gpbackupPath = fmt.Sprintf("%s/gpbackup", binDir)
   544  		gprestorePath = fmt.Sprintf("%s/gprestore", binDir)
   545  		backupHelperPath = fmt.Sprintf("%s/gpbackup_helper", binDir)
   546  		restoreHelperPath = backupHelperPath
   547  	}
   548  	segConfig := cluster.MustGetSegmentConfiguration(backupConn)
   549  	backupCluster = cluster.NewCluster(segConfig)
   550  
   551  	if backupConn.Version.Before("6") {
   552  		testutils.SetupTestFilespace(backupConn, backupCluster)
   553  	} else {
   554  		remoteOutput := backupCluster.GenerateAndExecuteCommand(
   555  			"Creating filespace test directories on all hosts",
   556  			cluster.ON_HOSTS|cluster.INCLUDE_COORDINATOR,
   557  			func(contentID int) string {
   558  				return fmt.Sprintf("mkdir -p /tmp/test_dir && mkdir -p /tmp/test_dir1 && mkdir -p /tmp/test_dir2")
   559  			})
   560  		if remoteOutput.NumErrors != 0 {
   561  			Fail("Could not create filespace test directory on 1 or more hosts")
   562  		}
   563  	}
   564  
   565  	saveHistory(backupCluster)
   566  
   567  	err = os.MkdirAll(customBackupDir, 0777)
   568  	if err != nil {
   569  		Fail(fmt.Sprintf("Failed to create directory: %s. Error: %s", customBackupDir, err.Error()))
   570  	}
   571  	// Flag validation
   572  	_, err = os.Stat(customBackupDir)
   573  	if os.IsNotExist(err) {
   574  		Fail(fmt.Sprintf("Custom backup directory %s does not exist.", customBackupDir))
   575  	}
   576  	// capture cluster size for resize tests
   577  	segmentCount = len(backupCluster.Segments) - 1
   578  
   579  })
   580  
   581  var _ = AfterSuite(func() {
   582  	if testFailure {
   583  		return
   584  	}
   585  	_ = utils.CopyFile(saveHistoryFilePath, historyFilePath)
   586  
   587  	if backupConn.Version.Before("6") {
   588  		testutils.DestroyTestFilespace(backupConn)
   589  	} else {
   590  		_ = exec.Command("psql", "postgres",
   591  			"-c", "DROP RESOURCE QUEUE test_queue").Run()
   592  		_ = exec.Command("psql", "postgres",
   593  			"-c", "DROP TABLESPACE test_tablespace").Run()
   594  		remoteOutput := backupCluster.GenerateAndExecuteCommand(
   595  			"Removing /tmp/test_dir* directories on all hosts",
   596  			cluster.ON_HOSTS|cluster.INCLUDE_COORDINATOR,
   597  			func(contentID int) string {
   598  				return fmt.Sprintf("rm -rf /tmp/test_dir*")
   599  			})
   600  		if remoteOutput.NumErrors != 0 {
   601  			Fail("Could not remove /tmp/testdir* directories on 1 or more hosts")
   602  		}
   603  	}
   604  	if backupConn != nil {
   605  		backupConn.Close()
   606  	}
   607  	if restoreConn != nil {
   608  		restoreConn.Close()
   609  	}
   610  	CleanupBuildArtifacts()
   611  	err := exec.Command("dropdb", "testdb").Run()
   612  	if err != nil {
   613  		fmt.Printf("Could not drop testdb: %v\n", err)
   614  	}
   615  	err = exec.Command("dropdb", "restoredb").Run()
   616  	if err != nil {
   617  		fmt.Printf("Could not drop restoredb: %v\n", err)
   618  	}
   619  })
   620  
   621  func end_to_end_setup() {
   622  	testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA IF EXISTS schema2 CASCADE; DROP SCHEMA public CASCADE; CREATE SCHEMA public;")
   623  
   624  	// Try to drop some objects that test failures might leave lying around
   625  	// We can't use AssertQueryRuns since if an object doesn't exist it will error out, and these objects don't have IF EXISTS as an option
   626  	backupConn.Exec("DROP ROLE testrole; DROP ROLE global_role; DROP RESOURCE QUEUE test_queue; DROP RESOURCE GROUP rg_test_group; DROP TABLESPACE test_tablespace;")
   627  	restoreConn.Exec("DROP ROLE testrole; DROP ROLE global_role; DROP RESOURCE QUEUE test_queue; DROP RESOURCE GROUP rg_test_group; DROP TABLESPACE test_tablespace;")
   628  	if backupConn.Version.AtLeast("6") {
   629  		backupConn.Exec("DROP FOREIGN DATA WRAPPER fdw CASCADE;")
   630  		restoreConn.Exec("DROP FOREIGN DATA WRAPPER fdw CASCADE;")
   631  	}
   632  	// The gp_toolkit extension should be created automatically, but in some cases it either isn't
   633  	// being created or is being dropped, so for now we explicitly create it to avoid spurious failures.
   634  	// TODO: Track down the cause of the issue so we don't need to manually create it.
   635  	if backupConn.Version.AtLeast("7") {
   636  		backupConn.Exec("CREATE EXTENSION gp_toolkit;")
   637  		restoreConn.Exec("CREATE EXTENSION gp_toolkit;")
   638  	}
   639  
   640  	publicSchemaTupleCounts = map[string]int{
   641  		"public.foo":   40000,
   642  		"public.holds": 50000,
   643  		"public.sales": 13,
   644  	}
   645  	schema2TupleCounts = map[string]int{
   646  		"schema2.returns": 6,
   647  		"schema2.foo2":    0,
   648  		"schema2.foo3":    100,
   649  		"schema2.ao1":     1000,
   650  		"schema2.ao2":     1000,
   651  	}
   652  
   653  	// note that BeforeSuite has saved off history file, in case of running on
   654  	// workstation where we want to retain normal (non-test?) history
   655  	// we remove in order to work around an old common-library bug in closing a
   656  	// file after writing, and truncating when opening to write, both of which
   657  	// manifest as a broken history file in old code
   658  	_ = os.Remove(historyFilePath)
   659  
   660  	// Assign a unique directory for each test
   661  	backupDir, _ = ioutil.TempDir(customBackupDir, "temp")
   662  }
   663  
   664  func end_to_end_teardown() {
   665  	_ = os.RemoveAll(backupDir)
   666  }
   667  
   668  var _ = Describe("backup and restore end to end tests", func() {
   669  	BeforeEach(func() {
   670  		end_to_end_setup()
   671  	})
   672  	AfterEach(func() {
   673  		end_to_end_teardown()
   674  	})
   675  
   676  	Describe("globals tests", func() {
   677  		It("runs gpbackup and gprestore with --with-globals", func() {
   678  			skipIfOldBackupVersionBefore("1.8.2")
   679  			createGlobalObjects(backupConn)
   680  
   681  			output := gpbackup(gpbackupPath, backupHelperPath)
   682  			timestamp := getBackupTimestamp(string(output))
   683  
   684  			dropGlobalObjects(backupConn, true)
   685  			defer dropGlobalObjects(backupConn, false)
   686  
   687  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   688  				"--redirect-db", "restoredb",
   689  				"--with-globals")
   690  		})
   691  		It("runs gpbackup and gprestore with --with-globals and --create-db", func() {
   692  			skipIfOldBackupVersionBefore("1.8.2")
   693  			createGlobalObjects(backupConn)
   694  			if backupConn.Version.AtLeast("6") {
   695  				testhelper.AssertQueryRuns(backupConn,
   696  					"ALTER ROLE global_role IN DATABASE global_db SET search_path TO public,pg_catalog;")
   697  			}
   698  
   699  			output := gpbackup(gpbackupPath, backupHelperPath)
   700  			timestamp := getBackupTimestamp(string(output))
   701  
   702  			dropGlobalObjects(backupConn, true)
   703  			defer dropGlobalObjects(backupConn, true)
   704  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   705  				"--redirect-db", "global_db",
   706  				"--with-globals",
   707  				"--create-db")
   708  		})
   709  		It("runs gpbackup with --without-globals", func() {
   710  			skipIfOldBackupVersionBefore("1.18.0")
   711  			createGlobalObjects(backupConn)
   712  			defer dropGlobalObjects(backupConn, true)
   713  
   714  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--without-globals")
   715  			timestamp := getBackupTimestamp(string(output))
   716  
   717  			configFileContents := getMetdataFileContents(backupDir, timestamp, "config.yaml")
   718  			Expect(string(configFileContents)).To(ContainSubstring("withoutglobals: true"))
   719  
   720  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
   721  			Expect(string(metadataFileContents)).ToNot(ContainSubstring("CREATE ROLE testrole"))
   722  
   723  			tocFileContents := getMetdataFileContents(backupDir, timestamp, "toc.yaml")
   724  			tocStruct := &toc.TOC{}
   725  			err := yaml.Unmarshal(tocFileContents, tocStruct)
   726  			Expect(err).ToNot(HaveOccurred())
   727  			Expect(len(tocStruct.GlobalEntries)).To(Equal(1))
   728  			Expect(tocStruct.GlobalEntries[0].ObjectType).To(Equal(toc.OBJ_SESSION_GUC))
   729  		})
   730  		It("runs gpbackup with --without-globals and --metadata-only", func() {
   731  			skipIfOldBackupVersionBefore("1.18.0")
   732  			createGlobalObjects(backupConn)
   733  			defer dropGlobalObjects(backupConn, true)
   734  
   735  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--without-globals", "--metadata-only")
   736  			timestamp := getBackupTimestamp(string(output))
   737  
   738  			configFileContents := getMetdataFileContents(backupDir, timestamp, "config.yaml")
   739  			Expect(string(configFileContents)).To(ContainSubstring("withoutglobals: true"))
   740  
   741  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
   742  			Expect(string(metadataFileContents)).ToNot(ContainSubstring("CREATE ROLE testrole"))
   743  
   744  			tocFileContents := getMetdataFileContents(backupDir, timestamp, "toc.yaml")
   745  			tocStruct := &toc.TOC{}
   746  			err := yaml.Unmarshal(tocFileContents, tocStruct)
   747  			Expect(err).ToNot(HaveOccurred())
   748  			Expect(len(tocStruct.GlobalEntries)).To(Equal(1))
   749  			Expect(tocStruct.GlobalEntries[0].ObjectType).To(Equal(toc.OBJ_SESSION_GUC))
   750  		})
   751  	})
   752  	Describe(`On Error Continue`, func() {
   753  		It(`gprestore continues when encountering errors during data load with --single-data-file and --on-error-continue`, func() {
   754  			if segmentCount != 3 {
   755  				Skip("Restoring from a tarred backup currently requires a 3-segment cluster to test.")
   756  			}
   757  
   758  			// This backup is corrupt because the data for a single row on
   759  			// segment0 was changed so that the value stored in the row is
   760  			// 9 instead of 1.  This will cause an issue when COPY FROM
   761  			// attempts to restore this data because it will error out
   762  			// stating it belongs to a different segment. This backup was
   763  			// taken with gpbackup version 1.12.1 and GPDB version 4.3.33.2
   764  
   765  			extractDirectory := extractSavedTarFile(backupDir, "corrupt-db")
   766  
   767  			gprestoreCmd := exec.Command(gprestorePath,
   768  				"--timestamp", "20190809230424",
   769  				"--redirect-db", "restoredb",
   770  				"--backup-dir", extractDirectory,
   771  				"--on-error-continue",
   772  				"--verbose")
   773  			_, err := gprestoreCmd.CombinedOutput()
   774  			Expect(err).To(HaveOccurred())
   775  
   776  			assertRelationsCreated(restoreConn, 3)
   777  			// Expect corrupt_table to have 0 tuples because data load should have failed due violation of distribution key constraint.
   778  			assertDataRestored(restoreConn, map[string]int{
   779  				"public.corrupt_table": 0,
   780  				"public.good_table1":   10,
   781  				"public.good_table2":   10})
   782  		})
   783  		It(`Creates skip file on segments for corrupted table for helpers to discover the file and skip it with --single-data-file and --on-error-continue`, func() {
   784  			if useOldBackupVersion {
   785  				Skip("This test is not needed for old backup versions")
   786  			} else if restoreConn.Version.Before("6") {
   787  				Skip("This test does not apply to GPDB versions before 6X")
   788  			} else if segmentCount != 3 {
   789  				Skip("Restoring from a tarred backup currently requires a 3-segment cluster to test.")
   790  			}
   791  
   792  			extractDirectory := extractSavedTarFile(backupDir, "corrupt-db")
   793  
   794  			testhelper.AssertQueryRuns(restoreConn,
   795  				"CREATE TABLE public.corrupt_table (i integer);")
   796  			defer testhelper.AssertQueryRuns(restoreConn,
   797  				"DROP TABLE public.corrupt_table")
   798  
   799  			// we know that broken value goes to seg2, so seg1 should be
   800  			// ok. Connect in utility mode to seg1.
   801  			segmentOne := backupCluster.ByContent[1]
   802  			port := segmentOne[0].Port
   803  			segConn := testutils.SetupTestDBConnSegment("restoredb", port, "", backupConn.Version)
   804  			defer segConn.Close()
   805  
   806  			// Take ACCESS EXCLUSIVE LOCK on public.corrupt_table which will
   807  			// make COPY on seg1 block until the lock is released. By that
   808  			// time, COPY on seg2 will fail and gprestore will create a skip
   809  			// file for public.corrupt_table. When the lock is released on seg1,
   810  			// the restore helper should discover the file and skip the table.
   811  			segConn.Begin(0)
   812  			segConn.Exec("LOCK TABLE public.corrupt_table IN ACCESS EXCLUSIVE MODE;")
   813  
   814  			gprestoreCmd := exec.Command(gprestorePath,
   815  				"--timestamp", "20190809230424",
   816  				"--redirect-db", "restoredb",
   817  				"--backup-dir", extractDirectory,
   818  				"--data-only", "--on-error-continue",
   819  				"--include-table", "public.corrupt_table",
   820  				"--verbose")
   821  			_, err := gprestoreCmd.CombinedOutput()
   822  			Expect(err).To(HaveOccurred())
   823  
   824  			segConn.Commit(0)
   825  			homeDir := os.Getenv("HOME")
   826  			helperLogs, _ := path.Glob(path.Join(homeDir, "gpAdminLogs/gprestore_*"))
   827  			cmdStr := fmt.Sprintf("tail -n 40 %s | grep \"Creating skip file\" || true", helperLogs[len(helperLogs)-1])
   828  
   829  			attemts := 1000
   830  			err = errors.New("Timeout to discover skip file")
   831  			for attemts > 0 {
   832  				output := mustRunCommand(exec.Command("bash", "-c", cmdStr))
   833  				if strings.TrimSpace(string(output)) == "" {
   834  					time.Sleep(5 * time.Millisecond)
   835  					attemts--
   836  				} else {
   837  					err = nil
   838  					break
   839  				}
   840  			}
   841  			Expect(err).NotTo(HaveOccurred())
   842  		})
   843  		It(`ensures gprestore on corrupt backup with --on-error-continue logs error tables`, func() {
   844  			if segmentCount != 3 {
   845  				Skip("Restoring from a tarred backup currently requires a 3-segment cluster to test.")
   846  			}
   847  			extractDirectory := extractSavedTarFile(backupDir, "corrupt-db")
   848  
   849  			// Restore command with data error
   850  			// Metadata errors due to invalid alter ownership
   851  			expectedErrorTablesData := []string{"public.corrupt_table"}
   852  			expectedErrorTablesMetadata := []string{
   853  				"public.corrupt_table", "public.good_table1", "public.good_table2"}
   854  			gprestoreCmd := exec.Command(gprestorePath,
   855  				"--timestamp", "20190809230424",
   856  				"--redirect-db", "restoredb",
   857  				"--backup-dir", extractDirectory,
   858  				"--on-error-continue",
   859  				"--verbose")
   860  			_, _ = gprestoreCmd.CombinedOutput()
   861  
   862  			files, _ := path.Glob(path.Join(extractDirectory, "/*-1/backups/*",
   863  				"20190809230424", "*error_tables*"))
   864  			Expect(files).To(HaveLen(2))
   865  
   866  			Expect(files[0]).To(HaveSuffix("_data"))
   867  			contents, err := ioutil.ReadFile(files[0])
   868  			Expect(err).ToNot(HaveOccurred())
   869  			tables := strings.Split(string(contents), "\n")
   870  			Expect(tables).To(Equal(expectedErrorTablesData))
   871  			_ = os.Remove(files[0])
   872  
   873  			Expect(files).To(HaveLen(2))
   874  			Expect(files[1]).To(HaveSuffix("_metadata"))
   875  			contents, err = ioutil.ReadFile(files[1])
   876  			Expect(err).ToNot(HaveOccurred())
   877  			tables = strings.Split(string(contents), "\n")
   878  			sort.Strings(tables)
   879  			Expect(tables).To(Equal(expectedErrorTablesMetadata))
   880  			_ = os.Remove(files[1])
   881  
   882  			// Restore command with tables containing multiple metadata errors
   883  			// This test is to ensure we don't have tables with multiple errors show up twice
   884  			gprestoreCmd = exec.Command(gprestorePath,
   885  				"--timestamp", "20190809230424",
   886  				"--redirect-db", "restoredb",
   887  				"--backup-dir", path.Join(backupDir, "corrupt-db"),
   888  				"--metadata-only",
   889  				"--on-error-continue",
   890  				"--verbose")
   891  			_, _ = gprestoreCmd.CombinedOutput()
   892  			expectedErrorTablesMetadata = []string{
   893  				"public.corrupt_table", "public.good_table1", "public.good_table2"}
   894  			files, _ = path.Glob(path.Join(backupDir, "/corrupt-db/",
   895  				"*-1/backups/*", "20190809230424", "*error_tables*"))
   896  			Expect(files).To(HaveLen(1))
   897  			Expect(files[0]).To(HaveSuffix("_metadata"))
   898  			contents, err = ioutil.ReadFile(files[0])
   899  			Expect(err).ToNot(HaveOccurred())
   900  			tables = strings.Split(string(contents), "\n")
   901  			sort.Strings(tables)
   902  			Expect(tables).To(HaveLen(len(expectedErrorTablesMetadata)))
   903  			_ = os.Remove(files[0])
   904  		})
   905  		It(`ensures gprestore of corrupt backup with --on-error-continue only logs tables in error_tables_metadata file`, func() {
   906  			if segmentCount != 3 {
   907  				Skip("Restoring from a tarred backup currently requires a 3-segment cluster to test.")
   908  			}
   909  			// The functionality works on 5, but it's currently difficult to make a saved backup on a 5 cluster.
   910  			// TODO: Remove this and re-do the saved backup once we get local 5 testing working again.
   911  			testutils.SkipIfBefore6(backupConn)
   912  
   913  			// This backup is corrupt because the CREATE statement for corrupt_type
   914  			// was changed to substitute "NULL" for "text", causing the statement
   915  			// to error out and also preventing corrupt_table from being created
   916  			// because the type does not exist.  The backup was taken with gpbackup
   917  			// version 1.29.1 and GPDB version 6.23.2.
   918  			extractDirectory := extractSavedTarFile(backupDir, "corrupt-metadata-db")
   919  
   920  			expectedErrorTablesMetadata := []string{"public.corrupt_table"}
   921  			gprestoreCmd := exec.Command(gprestorePath,
   922  				"--timestamp", "20230727021246",
   923  				"--redirect-db", "restoredb",
   924  				"--backup-dir", extractDirectory,
   925  				"--on-error-continue",
   926  				"--verbose")
   927  			_, _ = gprestoreCmd.CombinedOutput()
   928  
   929  			files, _ := path.Glob(path.Join(extractDirectory, "/*-1/backups/*", "20230727021246", "*error_tables*"))
   930  			Expect(files).To(HaveLen(1))
   931  
   932  			Expect(files[0]).To(HaveSuffix("_metadata"))
   933  			contents, err := ioutil.ReadFile(files[0])
   934  			Expect(err).ToNot(HaveOccurred())
   935  			tables := strings.Split(string(contents), "\n")
   936  			Expect(tables).To(Equal(expectedErrorTablesMetadata))
   937  			_ = os.Remove(files[0])
   938  		})
   939  		It(`ensures successful gprestore with --on-error-continue does not log error tables`, func() {
   940  			// Ensure no error tables with successful restore
   941  			output := gpbackup(gpbackupPath, backupHelperPath,
   942  				"--no-compression",
   943  				"--backup-dir", backupDir)
   944  			timestamp := getBackupTimestamp(string(output))
   945  
   946  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   947  				"--redirect-db", "restoredb",
   948  				"--backup-dir", backupDir,
   949  				"--on-error-continue")
   950  			errorFilePath := path.Join(backupDir, "backups/*", timestamp, "_error_tables")
   951  			if _, err := os.Stat(path.Join(backupDir, "backups")); err != nil && os.IsNotExist(err) {
   952  				errorFilePath = path.Join(backupDir, "*-1/backups/*", timestamp, "_error_tables")
   953  			}
   954  			files, err := path.Glob(errorFilePath)
   955  			Expect(err).ToNot(HaveOccurred())
   956  			Expect(files).To(HaveLen(0))
   957  		})
   958  	})
   959  	Describe("Redirect Schema", func() {
   960  		It("runs gprestore with --redirect-schema restoring data and statistics to the new schema", func() {
   961  			skipIfOldBackupVersionBefore("1.17.0")
   962  			testhelper.AssertQueryRuns(restoreConn,
   963  				"DROP SCHEMA IF EXISTS schema3 CASCADE; CREATE SCHEMA schema3;")
   964  			defer testhelper.AssertQueryRuns(restoreConn,
   965  				"DROP SCHEMA schema3 CASCADE")
   966  			testhelper.AssertQueryRuns(backupConn,
   967  				"CREATE INDEX foo3_idx1 ON schema2.foo3(i)")
   968  			defer testhelper.AssertQueryRuns(backupConn,
   969  				"DROP INDEX schema2.foo3_idx1")
   970  			testhelper.AssertQueryRuns(backupConn,
   971  				"ANALYZE schema2.foo3")
   972  			output := gpbackup(gpbackupPath, backupHelperPath,
   973  				"--with-stats")
   974  			timestamp := getBackupTimestamp(string(output))
   975  
   976  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   977  				"--redirect-db", "restoredb",
   978  				"--include-table", "schema2.foo3",
   979  				"--redirect-schema", "schema3",
   980  				"--with-stats")
   981  
   982  			schema3TupleCounts := map[string]int{
   983  				"schema3.foo3": 100,
   984  			}
   985  			assertDataRestored(restoreConn, schema3TupleCounts)
   986  			assertPGClassStatsRestored(restoreConn, restoreConn, schema3TupleCounts)
   987  
   988  			actualIndexCount := dbconn.MustSelectString(restoreConn,
   989  				`SELECT count(*) AS string FROM pg_indexes WHERE schemaname='schema3' AND indexname='foo3_idx1';`)
   990  			Expect(actualIndexCount).To(Equal("1"))
   991  
   992  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
   993  				`SELECT count(*) FROM pg_statistic WHERE starelid='schema3.foo3'::regclass::oid;`)
   994  			Expect(actualStatisticCount).To(Equal("1"))
   995  		})
   996  		It("runs gprestore with --redirect-schema to redirect data back to the original database which still contain the original tables", func() {
   997  			skipIfOldBackupVersionBefore("1.17.0")
   998  			testhelper.AssertQueryRuns(backupConn,
   999  				"DROP SCHEMA IF EXISTS schema3 CASCADE; CREATE SCHEMA schema3;")
  1000  			defer testhelper.AssertQueryRuns(backupConn,
  1001  				"DROP SCHEMA schema3 CASCADE")
  1002  			testhelper.AssertQueryRuns(backupConn,
  1003  				"CREATE INDEX foo3_idx1 ON schema2.foo3(i)")
  1004  			defer testhelper.AssertQueryRuns(backupConn,
  1005  				"DROP INDEX schema2.foo3_idx1")
  1006  			testhelper.AssertQueryRuns(backupConn,
  1007  				"ANALYZE schema2.foo3")
  1008  			output := gpbackup(gpbackupPath, backupHelperPath,
  1009  				"--with-stats")
  1010  			timestamp := getBackupTimestamp(string(output))
  1011  
  1012  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1013  				"--include-table", "schema2.foo3",
  1014  				"--redirect-schema", "schema3",
  1015  				"--with-stats")
  1016  
  1017  			schema3TupleCounts := map[string]int{
  1018  				"schema3.foo3": 100,
  1019  			}
  1020  			assertDataRestored(backupConn, schema3TupleCounts)
  1021  			assertPGClassStatsRestored(backupConn, backupConn, schema3TupleCounts)
  1022  
  1023  			actualIndexCount := dbconn.MustSelectString(backupConn,
  1024  				`SELECT count(*) AS string FROM pg_indexes WHERE schemaname='schema3' AND indexname='foo3_idx1';`)
  1025  			Expect(actualIndexCount).To(Equal("1"))
  1026  
  1027  			actualStatisticCount := dbconn.MustSelectString(backupConn,
  1028  				`SELECT count(*) FROM pg_statistic WHERE starelid='schema3.foo3'::regclass::oid;`)
  1029  			Expect(actualStatisticCount).To(Equal("1"))
  1030  		})
  1031  		It("runs gprestore with --redirect-schema and multiple included schemas", func() {
  1032  			skipIfOldBackupVersionBefore("1.17.0")
  1033  			testhelper.AssertQueryRuns(restoreConn,
  1034  				"DROP SCHEMA IF EXISTS schema3 CASCADE; CREATE SCHEMA schema3;")
  1035  			defer testhelper.AssertQueryRuns(restoreConn,
  1036  				"DROP SCHEMA schema3 CASCADE")
  1037  			testhelper.AssertQueryRuns(backupConn,
  1038  				"CREATE SCHEMA \"FOO\"")
  1039  			defer testhelper.AssertQueryRuns(backupConn,
  1040  				"DROP SCHEMA \"FOO\" CASCADE")
  1041  			testhelper.AssertQueryRuns(backupConn,
  1042  				"CREATE TABLE \"FOO\".bar(i int)")
  1043  
  1044  			tableFile := path.Join(backupDir, "test-table-file.txt")
  1045  			includeFile := iohelper.MustOpenFileForWriting(tableFile)
  1046  			utils.MustPrintln(includeFile,
  1047  				"public.sales\nschema2.foo2\nschema2.ao1")
  1048  			utils.MustPrintln(includeFile,
  1049  				"public.sales\nschema2.foo2\nschema2.ao1\nFOO.bar")
  1050  			output := gpbackup(gpbackupPath, backupHelperPath)
  1051  			timestamp := getBackupTimestamp(string(output))
  1052  
  1053  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1054  				"--include-table-file", tableFile,
  1055  				"--redirect-db", "restoredb",
  1056  				"--redirect-schema", "schema3")
  1057  
  1058  			schema3TupleCounts := map[string]int{
  1059  				"schema3.foo2":  0,
  1060  				"schema3.ao1":   1000,
  1061  				"schema3.sales": 13,
  1062  				"schema3.bar":   0,
  1063  			}
  1064  			assertDataRestored(restoreConn, schema3TupleCounts)
  1065  			assertRelationsCreatedInSchema(restoreConn, "schema2", 0)
  1066  		})
  1067  		It("runs --redirect-schema with --matadata-only", func() {
  1068  			skipIfOldBackupVersionBefore("1.17.0")
  1069  			testhelper.AssertQueryRuns(restoreConn,
  1070  				"DROP SCHEMA IF EXISTS schema_to_redirect CASCADE; CREATE SCHEMA \"schema_to_redirect\";")
  1071  			defer testhelper.AssertQueryRuns(restoreConn,
  1072  				"DROP SCHEMA schema_to_redirect CASCADE")
  1073  			testhelper.AssertQueryRuns(backupConn,
  1074  				"CREATE SCHEMA schema_to_test")
  1075  			defer testhelper.AssertQueryRuns(backupConn,
  1076  				"DROP SCHEMA schema_to_test CASCADE")
  1077  			testhelper.AssertQueryRuns(backupConn,
  1078  				"CREATE TABLE schema_to_test.table_metadata_only AS SELECT generate_series(1,10)")
  1079  			output := gpbackup(gpbackupPath, backupHelperPath, "--metadata-only", "--include-schema", "schema_to_test")
  1080  			timestamp := getBackupTimestamp(string(output))
  1081  
  1082  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1083  				"--redirect-db", "restoredb",
  1084  				"--redirect-schema", "schema_to_redirect",
  1085  				"--include-table", "schema_to_test.table_metadata_only",
  1086  				"--metadata-only")
  1087  			assertRelationsCreatedInSchema(restoreConn, "schema_to_redirect", 1)
  1088  			assertDataRestored(restoreConn, map[string]int{"schema_to_redirect.table_metadata_only": 0})
  1089  		})
  1090  		It("runs --redirect-schema with --include-schema and --include-schema-file", func() {
  1091  			skipIfOldBackupVersionBefore("1.17.0")
  1092  			testhelper.AssertQueryRuns(restoreConn,
  1093  				"DROP SCHEMA IF EXISTS schema3 CASCADE; CREATE SCHEMA schema3;")
  1094  			defer testhelper.AssertQueryRuns(restoreConn,
  1095  				"DROP SCHEMA schema3 CASCADE")
  1096  			testhelper.AssertQueryRuns(backupConn,
  1097  				"CREATE SCHEMA fooschema")
  1098  			defer testhelper.AssertQueryRuns(backupConn,
  1099  				"DROP SCHEMA fooschema CASCADE")
  1100  			testhelper.AssertQueryRuns(backupConn,
  1101  				"CREATE TABLE fooschema.redirected_table(i int)")
  1102  
  1103  			schemaFile := path.Join(backupDir, "test-schema-file.txt")
  1104  			includeSchemaFd := iohelper.MustOpenFileForWriting(schemaFile)
  1105  			utils.MustPrintln(includeSchemaFd, "fooschema")
  1106  
  1107  			output := gpbackup(gpbackupPath, backupHelperPath)
  1108  			timestamp := getBackupTimestamp(string(output))
  1109  
  1110  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1111  				"--include-schema-file", schemaFile,
  1112  				"--include-schema", "schema2",
  1113  				"--redirect-db", "restoredb",
  1114  				"--redirect-schema", "schema3")
  1115  
  1116  			expectedSchema3TupleCounts := map[string]int{
  1117  				"schema3.returns":          6,
  1118  				"schema3.foo2":             0,
  1119  				"schema3.foo3":             100,
  1120  				"schema3.ao1":              1000,
  1121  				"schema3.ao2":              1000,
  1122  				"schema3.redirected_table": 0,
  1123  			}
  1124  			assertDataRestored(restoreConn, expectedSchema3TupleCounts)
  1125  			assertRelationsCreatedInSchema(restoreConn, "public", 0)
  1126  			assertRelationsCreatedInSchema(restoreConn, "schema2", 0)
  1127  			assertRelationsCreatedInSchema(restoreConn, "fooschema", 0)
  1128  		})
  1129  	})
  1130  	Describe("ACLs for extensions", func() {
  1131  		It("runs gpbackup and gprestores any user defined ACLs on extensions", func() {
  1132  			skipIfOldBackupVersionBefore("1.17.0")
  1133  			currentUser := os.Getenv("USER")
  1134  			testhelper.AssertQueryRuns(backupConn, "CREATE ROLE testrole")
  1135  			defer testhelper.AssertQueryRuns(backupConn,
  1136  				"DROP ROLE testrole")
  1137  			testhelper.AssertQueryRuns(backupConn, "CREATE EXTENSION pgcrypto")
  1138  			defer testhelper.AssertQueryRuns(backupConn,
  1139  				"DROP EXTENSION pgcrypto")
  1140  			// Create a grant on a function that belongs to the extension
  1141  			testhelper.AssertQueryRuns(backupConn,
  1142  				"GRANT EXECUTE ON FUNCTION gen_random_bytes(integer) to testrole WITH GRANT OPTION")
  1143  
  1144  			output := gpbackup(gpbackupPath, backupHelperPath,
  1145  				"--metadata-only")
  1146  			timestamp := getBackupTimestamp(string(output))
  1147  
  1148  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1149  				"--redirect-db", "restoredb")
  1150  
  1151  			extensionMetadata := backup.ObjectMetadata{
  1152  				ObjectType: toc.OBJ_FUNCTION, Privileges: []backup.ACL{
  1153  					{Grantee: "", Execute: true},
  1154  					{Grantee: currentUser, Execute: true},
  1155  					{Grantee: "testrole", ExecuteWithGrant: true},
  1156  				}, Owner: currentUser}
  1157  
  1158  			// Check for the corresponding grants in restored database
  1159  			uniqueID := testutils.UniqueIDFromObjectName(restoreConn,
  1160  				"public", "gen_random_bytes", backup.TYPE_FUNCTION)
  1161  			resultMetadataMap := backup.GetMetadataForObjectType(restoreConn, backup.TYPE_FUNCTION)
  1162  
  1163  			Expect(resultMetadataMap).To(Not(BeEmpty()))
  1164  			resultMetadata := resultMetadataMap[uniqueID]
  1165  			match, err := structmatcher.MatchStruct(&extensionMetadata).Match(&resultMetadata)
  1166  			Expect(err).To(Not(HaveOccurred()))
  1167  			Expect(match).To(BeTrue())
  1168  			// Following statement is needed in order to drop testrole
  1169  			testhelper.AssertQueryRuns(restoreConn, "DROP EXTENSION pgcrypto")
  1170  			assertArtifactsCleaned(timestamp)
  1171  		})
  1172  	})
  1173  	Describe("Restore with truncate-table", func() {
  1174  		It("runs gpbackup and gprestore with truncate-table and include-table flags", func() {
  1175  			output := gpbackup(gpbackupPath, backupHelperPath)
  1176  			timestamp := getBackupTimestamp(string(output))
  1177  
  1178  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1179  				"--redirect-db", "restoredb",
  1180  				"--include-table", "public.sales")
  1181  			assertDataRestored(restoreConn, map[string]int{
  1182  				"public.sales": 13})
  1183  
  1184  			testhelper.AssertQueryRuns(restoreConn,
  1185  				"INSERT into sales values(1, '2017-01-01', 109.99)")
  1186  			time.Sleep(1 * time.Second)
  1187  
  1188  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1189  				"--redirect-db", "restoredb",
  1190  				"--include-table", "public.sales",
  1191  				"--truncate-table", "--data-only")
  1192  			assertDataRestored(restoreConn, map[string]int{
  1193  				"public.sales": 13})
  1194  		})
  1195  		It("runs gpbackup and gprestore with truncate-table and include-table-file flags", func() {
  1196  			includeFile := iohelper.MustOpenFileForWriting("/tmp/include-tables.txt")
  1197  			utils.MustPrintln(includeFile, "public.sales")
  1198  			output := gpbackup(gpbackupPath, backupHelperPath,
  1199  				"--backup-dir", backupDir)
  1200  			timestamp := getBackupTimestamp(string(output))
  1201  
  1202  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1203  				"--redirect-db", "restoredb",
  1204  				"--backup-dir", backupDir,
  1205  				"--include-table-file", "/tmp/include-tables.txt")
  1206  			assertDataRestored(restoreConn, map[string]int{
  1207  				"public.sales": 13})
  1208  
  1209  			testhelper.AssertQueryRuns(restoreConn,
  1210  				"INSERT into sales values(1, '2017-01-01', 99.99)")
  1211  			time.Sleep(1 * time.Second)
  1212  
  1213  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1214  				"--redirect-db", "restoredb",
  1215  				"--backup-dir", backupDir,
  1216  				"--include-table-file", "/tmp/include-tables.txt",
  1217  				"--truncate-table", "--data-only")
  1218  			assertDataRestored(restoreConn, map[string]int{
  1219  				"public.sales": 13})
  1220  
  1221  			_ = os.Remove("/tmp/include-tables.txt")
  1222  		})
  1223  		It("runs gpbackup and gprestore with truncate-table flag against a leaf partition", func() {
  1224  			skipIfOldBackupVersionBefore("1.7.2")
  1225  			output := gpbackup(gpbackupPath, backupHelperPath,
  1226  				"--leaf-partition-data")
  1227  			timestamp := getBackupTimestamp(string(output))
  1228  
  1229  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1230  				"--redirect-db", "restoredb",
  1231  				"--include-table", "public.sales_1_prt_jan17")
  1232  
  1233  			testhelper.AssertQueryRuns(restoreConn,
  1234  				"INSERT into public.sales_1_prt_jan17 values(1, '2017-01-01', 99.99)")
  1235  			time.Sleep(1 * time.Second)
  1236  
  1237  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1238  				"--redirect-db", "restoredb",
  1239  				"--include-table", "public.sales_1_prt_jan17",
  1240  				"--truncate-table", "--data-only")
  1241  			assertDataRestored(restoreConn, map[string]int{
  1242  				"public.sales": 1, "public.sales_1_prt_jan17": 1})
  1243  		})
  1244  	})
  1245  	Describe("Restore with --run-analyze", func() {
  1246  		It("runs gprestore without --run-analyze", func() {
  1247  			output := gpbackup(gpbackupPath, backupHelperPath,
  1248  				"--include-table", "public.sales")
  1249  			timestamp := getBackupTimestamp(string(output))
  1250  
  1251  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1252  				"--redirect-db", "restoredb")
  1253  
  1254  			// Since --run-analyze was not used, there should be no statistics
  1255  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
  1256  				`SELECT count(*) FROM pg_statistic WHERE starelid='public.sales'::regclass::oid`)
  1257  			Expect(actualStatisticCount).To(Equal("0"))
  1258  		})
  1259  		It("runs gprestore with --run-analyze", func() {
  1260  			output := gpbackup(gpbackupPath, backupHelperPath,
  1261  				"--include-table", "public.sales")
  1262  			timestamp := getBackupTimestamp(string(output))
  1263  
  1264  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1265  				"--redirect-db", "restoredb",
  1266  				"--run-analyze")
  1267  
  1268  			// Since --run-analyze was used, there should be stats
  1269  			// for all 3 columns of the sales partition table
  1270  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
  1271  				`SELECT count(*) FROM pg_statistic WHERE starelid='public.sales'::regclass::oid`)
  1272  			Expect(actualStatisticCount).To(Equal("3"))
  1273  		})
  1274  		It("runs gprestore with --run-analyze and --redirect-schema", func() {
  1275  			skipIfOldBackupVersionBefore("1.17.0")
  1276  			testhelper.AssertQueryRuns(restoreConn, "CREATE SCHEMA fooschema")
  1277  			defer testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA fooschema CASCADE")
  1278  			output := gpbackup(gpbackupPath, backupHelperPath,
  1279  				"--include-table", "public.sales")
  1280  			timestamp := getBackupTimestamp(string(output))
  1281  
  1282  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1283  				"--redirect-db", "restoredb",
  1284  				"--include-table", "public.sales",
  1285  				"--redirect-schema", "fooschema",
  1286  				"--run-analyze")
  1287  
  1288  			// Since --run-analyze was used, there should be stats
  1289  			// for all 3 columns of the sales partition table.
  1290  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
  1291  				`SELECT count(*) FROM pg_statistic WHERE starelid='fooschema.sales'::regclass::oid`)
  1292  			Expect(actualStatisticCount).To(Equal("3"))
  1293  		})
  1294  		It("runs gpbackup with --leaf-partition-data and gprestore with --run-analyze", func() {
  1295  			output := gpbackup(gpbackupPath, backupHelperPath,
  1296  				"--include-table", "public.sales", "--leaf-partition-data")
  1297  			timestamp := getBackupTimestamp(string(output))
  1298  
  1299  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1300  				"--redirect-db", "restoredb",
  1301  				"--run-analyze")
  1302  
  1303  			// Since --run-analyze was used, there should be stats
  1304  			// for all 3 columns of the sales partition table. The
  1305  			// leaf partition stats should merge up to the root
  1306  			// partition.
  1307  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
  1308  				`SELECT count(*) FROM pg_statistic WHERE starelid='public.sales'::regclass::oid`)
  1309  			Expect(actualStatisticCount).To(Equal("3"))
  1310  		})
  1311  	})
  1312  	Describe("Restore with --report-dir", func() {
  1313  		It("runs gprestore without --report-dir", func() {
  1314  			output := gpbackup(gpbackupPath, backupHelperPath,
  1315  				"--include-table", "public.sales")
  1316  			timestamp := getBackupTimestamp(string(output))
  1317  
  1318  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1319  				"--redirect-db", "restoredb")
  1320  
  1321  			// Since --report-dir and --backup-dir were not used, restore report should be in default dir
  1322  			checkRestoreMetadataFile(backupCluster.GetDirForContent(-1), timestamp, "report", true)
  1323  		})
  1324  		It("runs gprestore without --report-dir, but with --backup-dir", func() {
  1325  			output := gpbackup(gpbackupPath, backupHelperPath,
  1326  				"--backup-dir", backupDir,
  1327  				"--include-table", "public.sales")
  1328  			timestamp := getBackupTimestamp(string(output))
  1329  
  1330  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1331  				"--backup-dir", backupDir,
  1332  				"--redirect-db", "restoredb")
  1333  
  1334  			// Since --backup-dir was used, restore report should be in backup dir
  1335  			checkRestoreMetadataFile(backupDir, timestamp, "report", true)
  1336  		})
  1337  		It("runs gprestore with --report-dir and same --backup-dir", func() {
  1338  			output := gpbackup(gpbackupPath, backupHelperPath,
  1339  				"--backup-dir", backupDir,
  1340  				"--include-table", "public.sales")
  1341  			timestamp := getBackupTimestamp(string(output))
  1342  
  1343  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1344  				"--backup-dir", backupDir,
  1345  				"--report-dir", backupDir,
  1346  				"--redirect-db", "restoredb")
  1347  
  1348  			// Since --report-dir and --backup-dir are the same, restore report should be in backup dir
  1349  			checkRestoreMetadataFile(backupDir, timestamp, "report", true)
  1350  		})
  1351  		It("runs gprestore with --report-dir and same --backup-dir, and --single-backup-dir", func() {
  1352  			if useOldBackupVersion {
  1353  				Skip("This test is not needed for old backup versions")
  1354  			}
  1355  			output := gpbackup(gpbackupPath, backupHelperPath,
  1356  				"--backup-dir", backupDir,
  1357  				"--include-table", "public.sales", "--single-backup-dir")
  1358  			timestamp := getBackupTimestamp(string(output))
  1359  
  1360  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1361  				"--backup-dir", backupDir,
  1362  				"--report-dir", backupDir,
  1363  				"--redirect-db", "restoredb")
  1364  
  1365  			// Since --report-dir and --backup-dir are the same, restore report should be in backup dir
  1366  			checkRestoreMetadataFile(backupDir, timestamp, "report", false)
  1367  		})
  1368  		It("runs gprestore with --report-dir and different --backup-dir", func() {
  1369  			reportDir := path.Join(backupDir, "restore")
  1370  			output := gpbackup(gpbackupPath, backupHelperPath,
  1371  				"--backup-dir", backupDir,
  1372  				"--include-table", "public.sales")
  1373  			timestamp := getBackupTimestamp(string(output))
  1374  
  1375  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1376  				"--backup-dir", backupDir,
  1377  				"--report-dir", reportDir,
  1378  				"--redirect-db", "restoredb")
  1379  
  1380  			// Since --report-dir differs from --backup-dir, restore report should be in report dir
  1381  			checkRestoreMetadataFile(reportDir, timestamp, "report", true)
  1382  		})
  1383  		It("runs gprestore with --report-dir and different --backup-dir, with --single-backup-dir", func() {
  1384  			if useOldBackupVersion {
  1385  				Skip("This test is not needed for old backup versions")
  1386  			}
  1387  			reportDir := path.Join(backupDir, "restore")
  1388  			output := gpbackup(gpbackupPath, backupHelperPath,
  1389  				"--backup-dir", backupDir,
  1390  				"--include-table", "public.sales", "--single-backup-dir")
  1391  			timestamp := getBackupTimestamp(string(output))
  1392  
  1393  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1394  				"--backup-dir", backupDir,
  1395  				"--report-dir", reportDir,
  1396  				"--redirect-db", "restoredb")
  1397  
  1398  			// Since --report-dir differs from --backup-dir, restore report should be in report dir
  1399  			checkRestoreMetadataFile(reportDir, timestamp, "report", false)
  1400  		})
  1401  		It("runs gprestore with --report-dir and check error_tables* files are present", func() {
  1402  			if segmentCount != 3 {
  1403  				Skip("Restoring from a tarred backup currently requires a 3-segment cluster to test.")
  1404  			}
  1405  			extractDirectory := extractSavedTarFile(backupDir, "corrupt-db")
  1406  			reportDir := path.Join(backupDir, "restore")
  1407  
  1408  			// Restore command with data error
  1409  			// Metadata errors due to invalid alter ownership
  1410  			gprestoreCmd := exec.Command(gprestorePath,
  1411  				"--timestamp", "20190809230424",
  1412  				"--redirect-db", "restoredb",
  1413  				"--backup-dir", extractDirectory,
  1414  				"--report-dir", reportDir,
  1415  				"--on-error-continue",
  1416  				"--verbose")
  1417  			_, _ = gprestoreCmd.CombinedOutput()
  1418  
  1419  			// All report files should be placed in the same dir
  1420  			checkRestoreMetadataFile(reportDir, "20190809230424", "report", true)
  1421  			checkRestoreMetadataFile(reportDir, "20190809230424", "error_tables_metadata", true)
  1422  			checkRestoreMetadataFile(reportDir, "20190809230424", "error_tables_data", true)
  1423  		})
  1424  	})
  1425  	Describe("Flag combinations", func() {
  1426  		It("runs gpbackup and gprestore without redirecting restore to another db", func() {
  1427  			err := exec.Command("createdb", "recreateme").Run()
  1428  			if err != nil {
  1429  				Fail(fmt.Sprintf("%v", err))
  1430  			}
  1431  
  1432  			// Specifying the recreateme database will override the default DB, testdb
  1433  			output := gpbackup(gpbackupPath, backupHelperPath,
  1434  				"--dbname", "recreateme")
  1435  			timestamp := getBackupTimestamp(string(output))
  1436  
  1437  			err = exec.Command("dropdb", "recreateme").Run()
  1438  			if err != nil {
  1439  				Fail(fmt.Sprintf("%v", err))
  1440  			}
  1441  
  1442  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1443  				"--create-db")
  1444  			recreatemeConn := testutils.SetupTestDbConn("recreateme")
  1445  			recreatemeConn.Close()
  1446  
  1447  			err = exec.Command("dropdb", "recreateme").Run()
  1448  			if err != nil {
  1449  				Fail(fmt.Sprintf("%v", err))
  1450  			}
  1451  		})
  1452  		It("runs basic gpbackup and gprestore with metadata and data-only flags", func() {
  1453  			output := gpbackup(gpbackupPath, backupHelperPath,
  1454  				"--metadata-only")
  1455  			timestamp := getBackupTimestamp(string(output))
  1456  
  1457  			output2 := gpbackup(gpbackupPath, backupHelperPath,
  1458  				"--data-only")
  1459  			timestamp2 := getBackupTimestamp(string(output2))
  1460  
  1461  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1462  				"--redirect-db", "restoredb")
  1463  			assertDataRestored(restoreConn, map[string]int{
  1464  				"public.foo": 0, "schema2.foo3": 0})
  1465  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1466  			gprestore(gprestorePath, restoreHelperPath, timestamp2,
  1467  				"--redirect-db", "restoredb")
  1468  
  1469  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1470  			assertDataRestored(restoreConn, schema2TupleCounts)
  1471  		})
  1472  		It("runs gpbackup and gprestore with metadata-only backup flag", func() {
  1473  			output := gpbackup(gpbackupPath, backupHelperPath,
  1474  				"--metadata-only")
  1475  			timestamp := getBackupTimestamp(string(output))
  1476  
  1477  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1478  				"--redirect-db", "restoredb")
  1479  
  1480  			assertDataRestored(restoreConn, map[string]int{
  1481  				"public.foo": 0, "schema2.foo3": 0})
  1482  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1483  		})
  1484  		It("runs gpbackup and gprestore with data-only backup flag", func() {
  1485  			testutils.ExecuteSQLFile(restoreConn, "resources/test_tables_ddl.sql")
  1486  
  1487  			output := gpbackup(gpbackupPath, backupHelperPath,
  1488  				"--data-only")
  1489  			timestamp := getBackupTimestamp(string(output))
  1490  
  1491  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1492  				"--redirect-db", "restoredb")
  1493  
  1494  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1495  			assertDataRestored(restoreConn, schema2TupleCounts)
  1496  		})
  1497  		It("runs gpbackup and gprestore with the data-only restore flag", func() {
  1498  			testutils.ExecuteSQLFile(restoreConn, "resources/test_tables_ddl.sql")
  1499  			testhelper.AssertQueryRuns(backupConn, "SELECT pg_catalog.setval('public.myseq2', 8888, false)")
  1500  			defer testhelper.AssertQueryRuns(backupConn, "SELECT pg_catalog.setval('public.myseq2', 100, false)")
  1501  
  1502  			outputBkp := gpbackup(gpbackupPath, backupHelperPath)
  1503  			timestamp := getBackupTimestamp(string(outputBkp))
  1504  
  1505  			outputRes := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1506  				"--redirect-db", "restoredb",
  1507  				"--data-only")
  1508  
  1509  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1510  			assertDataRestored(restoreConn, schema2TupleCounts)
  1511  
  1512  			// Assert that sequence values have been properly
  1513  			// updated as part of special sequence handling during
  1514  			// gprestore --data-only calls
  1515  			restoreSequenceValue := dbconn.MustSelectString(restoreConn,
  1516  				`SELECT last_value FROM public.myseq2`)
  1517  			Expect(restoreSequenceValue).To(Equal("8888"))
  1518  			Expect(string(outputRes)).To(ContainSubstring("Restoring sequence values"))
  1519  		})
  1520  		It("runs gpbackup and gprestore with the metadata-only restore flag", func() {
  1521  			output := gpbackup(gpbackupPath, backupHelperPath)
  1522  			timestamp := getBackupTimestamp(string(output))
  1523  
  1524  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1525  				"--redirect-db", "restoredb",
  1526  				"--metadata-only")
  1527  
  1528  			assertDataRestored(restoreConn, map[string]int{
  1529  				"public.foo": 0, "schema2.foo3": 0})
  1530  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1531  		})
  1532  		It("runs gpbackup and gprestore with leaf-partition-data and backupDir flags", func() {
  1533  			outputBkp := gpbackup(gpbackupPath, backupHelperPath,
  1534  				"--leaf-partition-data",
  1535  				"--backup-dir", backupDir)
  1536  			timestamp := getBackupTimestamp(string(outputBkp))
  1537  
  1538  			outputRes := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1539  				"--redirect-db", "restoredb",
  1540  				"--backup-dir", backupDir)
  1541  			Expect(string(outputRes)).To(ContainSubstring("table 31 of 31"))
  1542  
  1543  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1544  			assertDataRestored(restoreConn, schema2TupleCounts)
  1545  		})
  1546  		It("runs gpbackup and gprestore with no-compression flag", func() {
  1547  			outputBkp := gpbackup(gpbackupPath, backupHelperPath,
  1548  				"--no-compression",
  1549  				"--backup-dir", backupDir)
  1550  			timestamp := getBackupTimestamp(string(outputBkp))
  1551  
  1552  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1553  				"--redirect-db", "restoredb",
  1554  				"--backup-dir", backupDir)
  1555  
  1556  			configPath := path.Join(backupDir, "backups/*", timestamp, "*config.yaml")
  1557  			if _, err := os.Stat(path.Join(backupDir, "backups")); err != nil && os.IsNotExist(err) {
  1558  				configPath = path.Join(backupDir, "*-1/backups/*", timestamp, "*config.yaml")
  1559  			}
  1560  			configFile, err := path.Glob(configPath)
  1561  			Expect(err).ToNot(HaveOccurred())
  1562  			Expect(configFile).To(HaveLen(1))
  1563  
  1564  			contents, err := ioutil.ReadFile(configFile[0])
  1565  			Expect(err).ToNot(HaveOccurred())
  1566  
  1567  			Expect(string(contents)).To(ContainSubstring("compressed: false"))
  1568  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1569  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1570  			assertDataRestored(restoreConn, schema2TupleCounts)
  1571  		})
  1572  		It("runs gpbackup and gprestore with with-stats flag and single-backup-dir", func() {
  1573  			// gpbackup before version 1.18.0 does not dump pg_class statistics correctly
  1574  			skipIfOldBackupVersionBefore("1.18.0")
  1575  
  1576  			outputBkp := gpbackup(gpbackupPath, backupHelperPath,
  1577  				"--with-stats",
  1578  				"--backup-dir", backupDir, "--single-backup-dir")
  1579  			timestamp := getBackupTimestamp(string(outputBkp))
  1580  
  1581  			files, err := path.Glob(path.Join(backupDir, "backups/*",
  1582  				timestamp, "*statistics.sql"))
  1583  			Expect(err).ToNot(HaveOccurred())
  1584  			Expect(files).To(HaveLen(1))
  1585  
  1586  			outputRes := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1587  				"--redirect-db", "restoredb",
  1588  				"--with-stats",
  1589  				"--backup-dir", backupDir)
  1590  
  1591  			Expect(string(outputRes)).To(ContainSubstring("Query planner statistics restore complete"))
  1592  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1593  			assertDataRestored(restoreConn, schema2TupleCounts)
  1594  			assertPGClassStatsRestored(backupConn, restoreConn, publicSchemaTupleCounts)
  1595  			assertPGClassStatsRestored(backupConn, restoreConn, schema2TupleCounts)
  1596  
  1597  			statsQuery := fmt.Sprintf(`SELECT count(*) AS string FROM pg_statistic st left join pg_class cl on st.starelid = cl.oid left join pg_namespace nm on cl.relnamespace = nm.oid where %s;`, backup.SchemaFilterClause("nm"))
  1598  			backupStatisticCount := dbconn.MustSelectString(backupConn, statsQuery)
  1599  			restoredStatisticsCount := dbconn.MustSelectString(restoreConn, statsQuery)
  1600  
  1601  			Expect(backupStatisticCount).To(Equal(restoredStatisticsCount))
  1602  
  1603  			restoredTablesAnalyzed := dbconn.MustSelectString(restoreConn,
  1604  				`SELECT count(*) FROM pg_stat_last_operation WHERE objid IN ('public.foo'::regclass::oid, 'public.holds'::regclass::oid, 'public.sales'::regclass::oid, 'schema2.returns'::regclass::oid, 'schema2.foo2'::regclass::oid, 'schema2.foo3'::regclass::oid, 'schema2.ao1'::regclass::oid, 'schema2.ao2'::regclass::oid) AND staactionname='ANALYZE';`)
  1605  			Expect(restoredTablesAnalyzed).To(Equal("0"))
  1606  		})
  1607  		It("restores statistics only for tables specified in --include-table flag when runs gprestore with with-stats flag and single-backup-dir", func() {
  1608  			if useOldBackupVersion {
  1609  				Skip("This test is not needed for old backup versions")
  1610  			}
  1611  
  1612  			testhelper.AssertQueryRuns(backupConn,
  1613  				"CREATE TABLE public.table_to_include_with_stats(i int)")
  1614  			testhelper.AssertQueryRuns(backupConn,
  1615  				"INSERT INTO public.table_to_include_with_stats SELECT generate_series(0,9);")
  1616  
  1617  			defer testhelper.AssertQueryRuns(backupConn,
  1618  				"DROP TABLE public.table_to_include_with_stats")
  1619  			output := gpbackup(gpbackupPath, backupHelperPath,
  1620  				"--with-stats",
  1621  				"--backup-dir", backupDir,
  1622  				"--single-backup-dir")
  1623  			timestamp := getBackupTimestamp(string(output))
  1624  
  1625  			statFiles, err := path.Glob(path.Join(backupDir, "backups/*",
  1626  				timestamp, "*statistics.sql"))
  1627  			Expect(err).ToNot(HaveOccurred())
  1628  			Expect(statFiles).To(HaveLen(1))
  1629  
  1630  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1631  				"--redirect-db", "restoredb",
  1632  				"--with-stats",
  1633  				"--backup-dir", backupDir,
  1634  				"--include-table", "public.table_to_include_with_stats")
  1635  
  1636  			includeTableTupleCounts := map[string]int{
  1637  				"public.table_to_include_with_stats": 10,
  1638  			}
  1639  			assertDataRestored(backupConn, includeTableTupleCounts)
  1640  			assertPGClassStatsRestored(backupConn, restoreConn, includeTableTupleCounts)
  1641  
  1642  			rawCount := dbconn.MustSelectString(restoreConn,
  1643  				"SELECT count(*) FROM pg_statistic WHERE starelid = 'public.table_to_include_with_stats'::regclass::oid;")
  1644  			Expect(rawCount).To(Equal(strconv.Itoa(1)))
  1645  
  1646  			restoreTableCount := dbconn.MustSelectString(restoreConn,
  1647  				"SELECT count(*) FROM pg_class WHERE oid >= 16384 AND relnamespace in (SELECT oid from pg_namespace WHERE nspname in ('public', 'schema2'));")
  1648  			Expect(restoreTableCount).To(Equal(strconv.Itoa(1)))
  1649  		})
  1650  		It("runs gpbackup and gprestore with jobs flag", func() {
  1651  			skipIfOldBackupVersionBefore("1.3.0")
  1652  			outputBkp := gpbackup(gpbackupPath, backupHelperPath,
  1653  				"--backup-dir", backupDir,
  1654  				"--jobs", "4")
  1655  			timestamp := getBackupTimestamp(string(outputBkp))
  1656  
  1657  			outputRes := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1658  				"--redirect-db", "restoredb",
  1659  				"--backup-dir", backupDir,
  1660  				"--jobs", "4",
  1661  				"--verbose")
  1662  
  1663  			expectedString := fmt.Sprintf("table %d of %d", TOTAL_CREATE_STATEMENTS, TOTAL_CREATE_STATEMENTS)
  1664  			Expect(string(outputRes)).To(ContainSubstring(expectedString))
  1665  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1666  			assertDataRestored(restoreConn, schema2TupleCounts)
  1667  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1668  		})
  1669  		It("runs gpbackup with --version flag", func() {
  1670  			if useOldBackupVersion {
  1671  				Skip("This test is not needed for old backup versions")
  1672  			}
  1673  			command := exec.Command(gpbackupPath, "--version")
  1674  			output := mustRunCommand(command)
  1675  			Expect(string(output)).To(MatchRegexp(`gpbackup version \w+`))
  1676  		})
  1677  		It("runs gprestore with --version flag", func() {
  1678  			command := exec.Command(gprestorePath, "--version")
  1679  			output := mustRunCommand(command)
  1680  			Expect(string(output)).To(MatchRegexp(`gprestore version \w+`))
  1681  		})
  1682  		It("runs gprestore with --include-schema and --exclude-table flag", func() {
  1683  			output := gpbackup(gpbackupPath, backupHelperPath,
  1684  				"--metadata-only")
  1685  			timestamp := getBackupTimestamp(string(output))
  1686  
  1687  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1688  				"--redirect-db", "restoredb",
  1689  				"--include-schema", "schema2",
  1690  				"--exclude-table", "schema2.returns",
  1691  				"--metadata-only")
  1692  			assertRelationsCreated(restoreConn, 4)
  1693  		})
  1694  		It("runs gprestore with jobs flag and postdata has metadata", func() {
  1695  			if useOldBackupVersion {
  1696  				Skip("This test is not needed for old backup versions")
  1697  			}
  1698  
  1699  			if backupConn.Version.Before("6") {
  1700  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLESPACE test_tablespace FILESPACE test_dir")
  1701  			} else {
  1702  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLESPACE test_tablespace LOCATION '/tmp/test_dir';")
  1703  			}
  1704  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLESPACE test_tablespace;")
  1705  
  1706  			// Store everything in this test schema for easy test cleanup.
  1707  			testhelper.AssertQueryRuns(backupConn, "CREATE SCHEMA postdata_metadata;")
  1708  			defer testhelper.AssertQueryRuns(backupConn, "DROP SCHEMA postdata_metadata CASCADE;")
  1709  			defer testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA postdata_metadata CASCADE;")
  1710  
  1711  			// Create a table and indexes. Currently for indexes, there are 4 possible pieces
  1712  			// of metadata: TABLESPACE, CLUSTER, REPLICA IDENTITY, and COMMENT.
  1713  			testhelper.AssertQueryRuns(backupConn, "CREATE TABLE postdata_metadata.foobar (a int NOT NULL);")
  1714  			testhelper.AssertQueryRuns(backupConn, "CREATE INDEX fooidx1 ON postdata_metadata.foobar USING btree(a) TABLESPACE test_tablespace;")
  1715  			testhelper.AssertQueryRuns(backupConn, "CREATE INDEX fooidx2 ON postdata_metadata.foobar USING btree(a) TABLESPACE test_tablespace;")
  1716  			testhelper.AssertQueryRuns(backupConn, "CREATE UNIQUE INDEX fooidx3 ON postdata_metadata.foobar USING btree(a) TABLESPACE test_tablespace;")
  1717  			testhelper.AssertQueryRuns(backupConn, "COMMENT ON INDEX postdata_metadata.fooidx1 IS 'hello';")
  1718  			testhelper.AssertQueryRuns(backupConn, "COMMENT ON INDEX postdata_metadata.fooidx2 IS 'hello';")
  1719  			testhelper.AssertQueryRuns(backupConn, "COMMENT ON INDEX postdata_metadata.fooidx3 IS 'hello';")
  1720  			testhelper.AssertQueryRuns(backupConn, "ALTER TABLE postdata_metadata.foobar CLUSTER ON fooidx3;")
  1721  			if backupConn.Version.AtLeast("6") {
  1722  				testhelper.AssertQueryRuns(backupConn, "ALTER TABLE postdata_metadata.foobar REPLICA IDENTITY USING INDEX fooidx3")
  1723  			}
  1724  
  1725  			// Create a rule. Currently for rules, the only metadata is COMMENT.
  1726  			testhelper.AssertQueryRuns(backupConn, "CREATE RULE postdata_rule AS ON UPDATE TO postdata_metadata.foobar DO SELECT * FROM postdata_metadata.foobar;")
  1727  			testhelper.AssertQueryRuns(backupConn, "COMMENT ON RULE postdata_rule ON postdata_metadata.foobar IS 'hello';")
  1728  
  1729  			if backupConn.Version.Before("7") {
  1730  				// TODO: Remove this once support is added
  1731  				// Triggers on statements not yet supported in GPDB7, per src/backend/parser/gram.y:39460,39488
  1732  
  1733  				// Create a trigger. Currently for triggers, the only metadata is COMMENT.
  1734  				testhelper.AssertQueryRuns(backupConn, `CREATE TRIGGER postdata_trigger AFTER INSERT OR DELETE OR UPDATE ON postdata_metadata.foobar FOR EACH STATEMENT EXECUTE PROCEDURE pg_catalog."RI_FKey_check_ins"();`)
  1735  				testhelper.AssertQueryRuns(backupConn, "COMMENT ON TRIGGER postdata_trigger ON postdata_metadata.foobar IS 'hello';")
  1736  			}
  1737  
  1738  			// Create an event trigger. Currently for event triggers, there are 2 possible
  1739  			// pieces of metadata: ENABLE and COMMENT.
  1740  			if backupConn.Version.AtLeast("6") {
  1741  				testhelper.AssertQueryRuns(backupConn, "CREATE OR REPLACE FUNCTION postdata_metadata.postdata_eventtrigger_func() RETURNS event_trigger AS $$ BEGIN END $$ LANGUAGE plpgsql;")
  1742  				testhelper.AssertQueryRuns(backupConn, "CREATE EVENT TRIGGER postdata_eventtrigger ON sql_drop EXECUTE PROCEDURE postdata_metadata.postdata_eventtrigger_func();")
  1743  				testhelper.AssertQueryRuns(backupConn, "ALTER EVENT TRIGGER postdata_eventtrigger DISABLE;")
  1744  				testhelper.AssertQueryRuns(backupConn, "COMMENT ON EVENT TRIGGER postdata_eventtrigger IS 'hello'")
  1745  			}
  1746  
  1747  			outputBkp := gpbackup(gpbackupPath, backupHelperPath,
  1748  				"--metadata-only")
  1749  			timestamp := getBackupTimestamp(string(outputBkp))
  1750  
  1751  			outputRes := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1752  				"--redirect-db", "restoredb", "--jobs", "8", "--verbose")
  1753  
  1754  			// The gprestore parallel postdata restore should have succeeded without a CRITICAL error.
  1755  			stdout := string(outputRes)
  1756  			Expect(stdout).To(Not(ContainSubstring("CRITICAL")))
  1757  			Expect(stdout).To(Not(ContainSubstring("Error encountered when executing statement")))
  1758  		})
  1759  		Describe("Edge case tests", func() {
  1760  			It(`successfully backs up precise real data types`, func() {
  1761  				// Versions before 1.13.0 do not set the extra_float_digits GUC
  1762  				skipIfOldBackupVersionBefore("1.13.0")
  1763  
  1764  				tableName := "public.test_real_precision"
  1765  				tableNameCopy := "public.test_real_precision_copy"
  1766  				testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`CREATE TABLE %s (val real)`, tableName))
  1767  				defer testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`DROP TABLE %s`, tableName))
  1768  				testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`INSERT INTO %s VALUES (0.100001216)`, tableName))
  1769  				testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`CREATE TABLE %s AS SELECT * FROM %s`, tableNameCopy, tableName))
  1770  				defer testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`DROP TABLE %s`, tableNameCopy))
  1771  
  1772  				// We use --jobs flag to make sure all parallel connections have the GUC set properly
  1773  				outputBkp := gpbackup(gpbackupPath, backupHelperPath,
  1774  					"--backup-dir", backupDir,
  1775  					"--dbname", "testdb", "--jobs", "2",
  1776  					"--include-table", fmt.Sprintf("%s", tableName),
  1777  					"--include-table", fmt.Sprintf("%s", tableNameCopy))
  1778  				timestamp := getBackupTimestamp(string(outputBkp))
  1779  
  1780  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  1781  					"--redirect-db", "restoredb",
  1782  					"--backup-dir", backupDir)
  1783  				tableCount := dbconn.MustSelectString(restoreConn, fmt.Sprintf("SELECT count(*) FROM %s WHERE val = 0.100001216::real", tableName))
  1784  				Expect(tableCount).To(Equal(strconv.Itoa(1)))
  1785  				tableCopyCount := dbconn.MustSelectString(restoreConn, fmt.Sprintf("SELECT count(*) FROM %s WHERE val = 0.100001216::real", tableNameCopy))
  1786  				Expect(tableCopyCount).To(Equal(strconv.Itoa(1)))
  1787  			})
  1788  			It("does not retrieve trigger constraints  with the rest of the constraints", func() {
  1789  				if backupConn.Version.Is("7") {
  1790  					// TODO: Remove this once support is added
  1791  					Skip("Triggers on statements not yet supported in GPDB7, per src/backend/parser/gram.y:39460,39488")
  1792  				}
  1793  				testutils.SkipIfBefore6(backupConn)
  1794  				testhelper.AssertQueryRuns(backupConn,
  1795  					"CREATE TABLE table_multiple_constraints (a int)")
  1796  				defer testhelper.AssertQueryRuns(backupConn,
  1797  					"DROP TABLE IF EXISTS table_multiple_constraints CASCADE;")
  1798  
  1799  				// Add a trigger constraint
  1800  				testhelper.AssertQueryRuns(backupConn, `CREATE FUNCTION public.no_op_trig_fn() RETURNS trigger AS
  1801  $$begin RETURN NULL; end$$
  1802  LANGUAGE plpgsql NO SQL;`)
  1803  				defer testhelper.AssertQueryRuns(backupConn, `DROP FUNCTION IF EXISTS public.no_op_trig_fn() CASCADE`)
  1804  				testhelper.AssertQueryRuns(backupConn, "CREATE TRIGGER  test_trigger AFTER INSERT  ON public.table_multiple_constraints EXECUTE PROCEDURE public.no_op_trig_fn();")
  1805  
  1806  				// Add a non-trigger constraint
  1807  				testhelper.AssertQueryRuns(backupConn,
  1808  					"ALTER TABLE public.table_multiple_constraints ADD CONSTRAINT alter_table_with_primary_key_pkey PRIMARY KEY (a);")
  1809  
  1810  				// retrieve constraints, assert that only one is retrieved
  1811  				constraintsRetrieved := backup.GetConstraints(backupConn)
  1812  				Expect(len(constraintsRetrieved)).To(Equal(1))
  1813  
  1814  				// assert that the single retrieved constraint is the non-trigger constraint
  1815  				constraintRetrieved := constraintsRetrieved[0]
  1816  				Expect(constraintRetrieved.ConType).To(Equal("p"))
  1817  			})
  1818  			It("correctly distinguishes between domain and non-domain constraints", func() {
  1819  				testutils.SkipIfBefore6(backupConn)
  1820  				testhelper.AssertQueryRuns(backupConn,
  1821  					"CREATE TABLE table_multiple_constraints (a int)")
  1822  				defer testhelper.AssertQueryRuns(backupConn,
  1823  					"DROP TABLE IF EXISTS table_multiple_constraints CASCADE;")
  1824  
  1825  				// Add a domain with a constraint
  1826  				testhelper.AssertQueryRuns(backupConn, "CREATE DOMAIN public.const_domain1 AS text CONSTRAINT cons_check1 CHECK (char_length(VALUE) = 5);")
  1827  				defer testhelper.AssertQueryRuns(backupConn, `DROP DOMAIN IF EXISTS public.const_domain1;`)
  1828  
  1829  				// Add a non-trigger constraint
  1830  				testhelper.AssertQueryRuns(backupConn,
  1831  					"ALTER TABLE public.table_multiple_constraints ADD CONSTRAINT alter_table_with_primary_key_pkey PRIMARY KEY (a);")
  1832  
  1833  				// retrieve constraints, assert that two are retrieved, assert that the domain constraint is correctly categorized
  1834  				constraintsRetrieved := backup.GetConstraints(backupConn)
  1835  				Expect(len(constraintsRetrieved)).To(Equal(2))
  1836  				for _, constr := range constraintsRetrieved {
  1837  					if constr.Name == "cons_check1" {
  1838  						Expect(constr.IsDomainConstraint).To(Equal(true))
  1839  					} else if constr.Name == "alter_table_with_primary_key_pkey" {
  1840  						Expect(constr.IsDomainConstraint).To(Equal(false))
  1841  					} else {
  1842  						Fail("Unrecognized constraint in end-to-end test database")
  1843  					}
  1844  				}
  1845  			})
  1846  			It("backup and restore all data when NOT VALID option on constraints is specified", func() {
  1847  				testutils.SkipIfBefore6(backupConn)
  1848  				testhelper.AssertQueryRuns(backupConn,
  1849  					"CREATE TABLE legacy_table_violate_constraints (a int)")
  1850  				defer testhelper.AssertQueryRuns(backupConn,
  1851  					"DROP TABLE legacy_table_violate_constraints")
  1852  				testhelper.AssertQueryRuns(backupConn,
  1853  					"INSERT INTO legacy_table_violate_constraints values (0), (1), (2), (3), (4), (5), (6), (7)")
  1854  				testhelper.AssertQueryRuns(backupConn,
  1855  					"ALTER TABLE legacy_table_violate_constraints ADD CONSTRAINT new_constraint_not_valid CHECK (a > 4) NOT VALID")
  1856  				defer testhelper.AssertQueryRuns(backupConn,
  1857  					"ALTER TABLE legacy_table_violate_constraints DROP CONSTRAINT new_constraint_not_valid")
  1858  
  1859  				outputBkp := gpbackup(gpbackupPath, backupHelperPath,
  1860  					"--backup-dir", backupDir)
  1861  				timestamp := getBackupTimestamp(string(outputBkp))
  1862  
  1863  				_ = gprestore(gprestorePath, restoreHelperPath, timestamp,
  1864  					"--redirect-db", "restoredb",
  1865  					"--backup-dir", backupDir)
  1866  
  1867  				legacySchemaTupleCounts := map[string]int{
  1868  					`public."legacy_table_violate_constraints"`: 8,
  1869  				}
  1870  				assertDataRestored(restoreConn, legacySchemaTupleCounts)
  1871  
  1872  				isConstraintHere := dbconn.MustSelectString(restoreConn,
  1873  					"SELECT count(*) FROM pg_constraint WHERE conname='new_constraint_not_valid'")
  1874  				Expect(isConstraintHere).To(Equal(strconv.Itoa(1)))
  1875  
  1876  				_, err := restoreConn.Exec("INSERT INTO legacy_table_violate_constraints VALUES (1)")
  1877  				Expect(err).To(HaveOccurred())
  1878  				assertArtifactsCleaned(timestamp)
  1879  			})
  1880  			It("runs gpbackup and gprestore to backup tables depending on functions", func() {
  1881  				skipIfOldBackupVersionBefore("1.19.0")
  1882  				testhelper.AssertQueryRuns(backupConn, "CREATE FUNCTION func1(val integer) RETURNS integer AS $$ BEGIN RETURN val + 1; END; $$ LANGUAGE PLPGSQL;;")
  1883  				defer testhelper.AssertQueryRuns(backupConn, "DROP FUNCTION func1(val integer);")
  1884  
  1885  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLE test_depends_on_function (id integer, claim_id character varying(20) DEFAULT ('WC-'::text || func1(10)::text)) DISTRIBUTED BY (id);")
  1886  				defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE test_depends_on_function;")
  1887  				testhelper.AssertQueryRuns(backupConn, "INSERT INTO  test_depends_on_function values (1);")
  1888  				testhelper.AssertQueryRuns(backupConn, "INSERT INTO  test_depends_on_function values (2);")
  1889  
  1890  				output := gpbackup(gpbackupPath, backupHelperPath)
  1891  				timestamp := getBackupTimestamp(string(output))
  1892  
  1893  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  1894  					"--redirect-db", "restoredb")
  1895  
  1896  				assertRelationsCreated(restoreConn, TOTAL_RELATIONS+1) // for new table
  1897  				assertDataRestored(restoreConn, schema2TupleCounts)
  1898  				assertDataRestored(restoreConn, map[string]int{
  1899  					"public.foo":                      40000,
  1900  					"public.holds":                    50000,
  1901  					"public.sales":                    13,
  1902  					"public.test_depends_on_function": 2})
  1903  				assertArtifactsCleaned(timestamp)
  1904  			})
  1905  			It("runs gpbackup and gprestore to backup functions depending on tables", func() {
  1906  				skipIfOldBackupVersionBefore("1.19.0")
  1907  
  1908  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLE to_use_for_function (n int);")
  1909  				defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE to_use_for_function;")
  1910  
  1911  				testhelper.AssertQueryRuns(backupConn, "INSERT INTO  to_use_for_function values (1);")
  1912  				testhelper.AssertQueryRuns(backupConn, "CREATE FUNCTION func1(val integer) RETURNS integer AS $$ BEGIN RETURN val + (SELECT n FROM to_use_for_function); END; $$ LANGUAGE PLPGSQL;;")
  1913  
  1914  				defer testhelper.AssertQueryRuns(backupConn, "DROP FUNCTION func1(val integer);")
  1915  
  1916  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLE test_depends_on_function (id integer, claim_id character varying(20) DEFAULT ('WC-'::text || func1(10)::text)) DISTRIBUTED BY (id);")
  1917  				defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE test_depends_on_function;")
  1918  				testhelper.AssertQueryRuns(backupConn, "INSERT INTO  test_depends_on_function values (1);")
  1919  
  1920  				output := gpbackup(gpbackupPath, backupHelperPath)
  1921  				timestamp := getBackupTimestamp(string(output))
  1922  
  1923  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  1924  					"--redirect-db", "restoredb")
  1925  
  1926  				assertRelationsCreated(restoreConn, TOTAL_RELATIONS+2) // for 2 new tables
  1927  				assertDataRestored(restoreConn, schema2TupleCounts)
  1928  				assertDataRestored(restoreConn, map[string]int{
  1929  					"public.foo":                      40000,
  1930  					"public.holds":                    50000,
  1931  					"public.sales":                    13,
  1932  					"public.to_use_for_function":      1,
  1933  					"public.test_depends_on_function": 1})
  1934  
  1935  				assertArtifactsCleaned(timestamp)
  1936  			})
  1937  			It("Can restore xml with xmloption set to document", func() {
  1938  				testutils.SkipIfBefore6(backupConn)
  1939  				// Set up the XML table that contains XML content
  1940  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLE xml_test AS SELECT xml 'fooxml'")
  1941  				defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE xml_test")
  1942  
  1943  				// Set up database that has xmloption default to document instead of content
  1944  				testhelper.AssertQueryRuns(backupConn, "CREATE DATABASE document_db")
  1945  				defer testhelper.AssertQueryRuns(backupConn, "DROP DATABASE document_db")
  1946  				testhelper.AssertQueryRuns(backupConn, "ALTER DATABASE document_db SET xmloption TO document")
  1947  
  1948  				output := gpbackup(gpbackupPath, backupHelperPath, "--include-table", "public.xml_test")
  1949  				timestamp := getBackupTimestamp(string(output))
  1950  
  1951  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  1952  					"--redirect-db", "document_db")
  1953  			})
  1954  			It("does not hold lock on gp_segment_configuration when backup is in progress", func() {
  1955  				if useOldBackupVersion {
  1956  					Skip("This test is not needed for old backup versions")
  1957  				}
  1958  				// Block on pg_trigger, which gpbackup queries after gp_segment_configuration
  1959  				backupConn.MustExec("BEGIN; LOCK TABLE pg_trigger IN ACCESS EXCLUSIVE MODE")
  1960  
  1961  				args := []string{
  1962  					"--dbname", "testdb",
  1963  					"--backup-dir", backupDir,
  1964  					"--verbose"}
  1965  				cmd := exec.Command(gpbackupPath, args...)
  1966  
  1967  				backupConn.MustExec("COMMIT")
  1968  				anotherConn := testutils.SetupTestDbConn("testdb")
  1969  				defer anotherConn.Close()
  1970  				var lockCount int
  1971  				go func() {
  1972  					gpSegConfigQuery := `SELECT * FROM pg_locks l, pg_class c, pg_namespace n WHERE l.relation = c.oid AND n.oid = c.relnamespace AND c.relname = 'gp_segment_configuration';`
  1973  					_ = anotherConn.Get(&lockCount, gpSegConfigQuery)
  1974  				}()
  1975  
  1976  				Expect(lockCount).To(Equal(0))
  1977  
  1978  				output, _ := cmd.CombinedOutput()
  1979  				stdout := string(output)
  1980  				Expect(stdout).To(ContainSubstring("Backup completed successfully"))
  1981  			})
  1982  			It("properly handles various implicit casts on pg_catalog.text", func() {
  1983  				if useOldBackupVersion {
  1984  					Skip("This test is not needed for old backup versions")
  1985  				}
  1986  
  1987  				testutils.ExecuteSQLFile(backupConn, "resources/implicit_casts.sql")
  1988  
  1989  				args := []string{
  1990  					"--dbname", "testdb",
  1991  					"--backup-dir", backupDir,
  1992  					"--verbose"}
  1993  				cmd := exec.Command(gpbackupPath, args...)
  1994  
  1995  				output, _ := cmd.CombinedOutput()
  1996  				stdout := string(output)
  1997  				Expect(stdout).To(ContainSubstring("Backup completed successfully"))
  1998  			})
  1999  			It("Restores views that depend on a constraint by printing a dummy view", func() {
  2000  				testutils.SkipIfBefore6(backupConn)
  2001  				if useOldBackupVersion {
  2002  					Skip("This test is not needed for old backup versions")
  2003  				}
  2004  				testhelper.AssertQueryRuns(backupConn, `CREATE TABLE view_base_table (key int PRIMARY KEY, data varchar(20))`)
  2005  				testhelper.AssertQueryRuns(backupConn, `CREATE VIEW key_dependent_view AS SELECT key, data COLLATE "C" FROM view_base_table GROUP BY key;`)
  2006  				testhelper.AssertQueryRuns(backupConn, `CREATE VIEW key_dependent_view_no_cols AS SELECT FROM view_base_table GROUP BY key HAVING length(data) > 0`)
  2007  				defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE view_base_table CASCADE")
  2008  
  2009  				output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  2010  				timestamp := getBackupTimestamp(string(output))
  2011  
  2012  				contents := string(getMetdataFileContents(backupDir, timestamp, "metadata.sql"))
  2013  				Expect(contents).To(ContainSubstring("CREATE VIEW public.key_dependent_view AS \nSELECT\n\tNULL::integer AS key,\n\tNULL::character varying(20) COLLATE pg_catalog.\"C\" AS data;"))
  2014  				Expect(contents).To(ContainSubstring("CREATE VIEW public.key_dependent_view_no_cols AS \nSELECT;"))
  2015  				Expect(contents).To(ContainSubstring("ALTER TABLE ONLY public.view_base_table ADD CONSTRAINT view_base_table_pkey PRIMARY KEY (key);"))
  2016  				Expect(contents).To(ContainSubstring("CREATE OR REPLACE VIEW public.key_dependent_view AS  SELECT view_base_table.key,\n    (view_base_table.data COLLATE \"C\") AS data\n   FROM public.view_base_table\n  GROUP BY view_base_table.key;"))
  2017  				Expect(contents).To(ContainSubstring("CREATE OR REPLACE VIEW public.key_dependent_view_no_cols AS  SELECT\n   FROM public.view_base_table\n  GROUP BY view_base_table.key\n HAVING (length((view_base_table.data)::text) > 0);"))
  2018  
  2019  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  2020  					"--redirect-db", "restoredb",
  2021  					"--backup-dir", backupDir)
  2022  			})
  2023  		})
  2024  	})
  2025  	Describe("Properly handles enum type as distribution or partition key", func() {
  2026  		BeforeEach(func() {
  2027  			testutils.SkipIfBefore6(backupConn)
  2028  			if useOldBackupVersion {
  2029  				Skip("This test is not needed for old backup versions")
  2030  			}
  2031  
  2032  			testhelper.AssertQueryRuns(backupConn, `
  2033  			CREATE TYPE colors AS ENUM ('red', 'blue', 'green', 'yellow');
  2034  			CREATE TYPE fruits AS ENUM ('apple', 'banana', 'cherry', 'orange');`)
  2035  		})
  2036  		AfterEach(func() {
  2037  			testhelper.AssertQueryRuns(backupConn, "DROP TYPE colors CASCADE;")
  2038  			testhelper.AssertQueryRuns(backupConn, "DROP TYPE fruits CASCADE;")
  2039  		})
  2040  		It("Restores table data distributed by an enum", func() {
  2041  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE table_with_enum_distkey (key colors) DISTRIBUTED BY (key)`)
  2042  			testhelper.AssertQueryRuns(backupConn, `INSERT INTO table_with_enum_distkey VALUES ('red'), ('blue'), ('green'), ('yellow'),
  2043  			('red'), ('blue'), ('green'), ('yellow'), ('red'), ('blue'), ('green'), ('yellow'), ('red'), ('blue'), ('green'), ('yellow');`)
  2044  
  2045  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE table_with_enum_distkey CASCADE")
  2046  
  2047  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  2048  			timestamp := getBackupTimestamp(string(output))
  2049  
  2050  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  2051  				"--redirect-db", "restoredb",
  2052  				"--backup-dir", backupDir)
  2053  			assertDataRestored(restoreConn, map[string]int{
  2054  				"table_with_enum_distkey": 16})
  2055  		})
  2056  		It("Restores table data distributed by multi-key enum", func() {
  2057  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE table_with_multi_enum_distkey (key1 colors, key2 fruits) DISTRIBUTED BY (key1, key2);`)
  2058  			testhelper.AssertQueryRuns(backupConn, `INSERT INTO table_with_multi_enum_distkey (key1, key2) VALUES ('red', 'apple'), ('blue', 'orange'), ('green', 'cherry'), ('yellow', 'banana'), ('red', 'cherry'), ('blue', 'orange'), ('green', 'apple'), ('yellow', 'cherry'), ('red', 'banana'), ('blue', 'apple'), ('green', 'cherry'), ('yellow', 'orange'), ('red', 'apple'), ('blue', 'cherry'), ('green', 'banana'), ('yellow', 'apple');`)
  2059  
  2060  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE table_with_multi_enum_distkey CASCADE")
  2061  
  2062  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  2063  			timestamp := getBackupTimestamp(string(output))
  2064  
  2065  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  2066  				"--redirect-db", "restoredb",
  2067  				"--backup-dir", backupDir)
  2068  			assertDataRestored(restoreConn, map[string]int{
  2069  				"table_with_multi_enum_distkey": 16})
  2070  		})
  2071  		It("Restores table data distributed by altered enum type", func() {
  2072  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE table_with_altered_enum_distkey (key colors) DISTRIBUTED BY (key)`)
  2073  			testhelper.AssertQueryRuns(backupConn, `INSERT INTO table_with_altered_enum_distkey VALUES ('red'), ('blue'), ('green'), ('yellow'),
  2074  			('red'), ('blue'), ('green'), ('yellow'), ('red'), ('blue'), ('green'), ('yellow'), ('red'), ('blue'), ('green'), ('yellow');`)
  2075  			testhelper.AssertQueryRuns(backupConn, `ALTER TYPE colors ADD VALUE 'purple';`)
  2076  			testhelper.AssertQueryRuns(backupConn, `INSERT INTO table_with_altered_enum_distkey VALUES ('purple'), ('purple'), ('purple'), ('purple');`)
  2077  
  2078  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE table_with_altered_enum_distkey CASCADE")
  2079  
  2080  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  2081  			timestamp := getBackupTimestamp(string(output))
  2082  
  2083  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  2084  				"--redirect-db", "restoredb",
  2085  				"--backup-dir", backupDir)
  2086  			assertDataRestored(restoreConn, map[string]int{
  2087  				"table_with_altered_enum_distkey": 20})
  2088  		})
  2089  		It("Restores table data partitioned by enum", func() {
  2090  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE table_with_enum_partkey (a int, b colors) DISTRIBUTED BY (a) PARTITION BY LIST (b) 
  2091  																							(PARTITION red VALUES ('red'),
  2092  																							PARTITION blue VALUES ('blue'),
  2093  																							PARTITION green VALUES ('green'),
  2094  																							PARTITION yellow VALUES ('yellow'));`)
  2095  			testhelper.AssertQueryRuns(backupConn, `INSERT INTO table_with_enum_partkey VALUES (1, 'red'), (2, 'blue'), (3, 'green'), (4, 'yellow'),
  2096  			(5, 'red'), (6, 'blue'), (7, 'green'), (8, 'yellow'), (9, 'red'), (10, 'blue'), (11, 'green'), (12, 'yellow'), (13, 'red'), (14, 'blue'), (15, 'green'), (16, 'yellow');`)
  2097  			testhelper.AssertQueryRuns(backupConn, `ALTER TYPE colors ADD VALUE 'purple';`)
  2098  			testhelper.AssertQueryRuns(backupConn, `ALTER TABLE table_with_enum_partkey ADD PARTITION purple VALUES ('purple');`)
  2099  			testhelper.AssertQueryRuns(backupConn, `INSERT INTO table_with_enum_partkey VALUES (17, 'purple'), (18, 'purple'), (19, 'purple'), (20, 'purple');`)
  2100  
  2101  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE table_with_enum_partkey CASCADE")
  2102  
  2103  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  2104  			timestamp := getBackupTimestamp(string(output))
  2105  
  2106  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  2107  				"--redirect-db", "restoredb",
  2108  				"--backup-dir", backupDir)
  2109  			assertDataRestored(restoreConn, map[string]int{
  2110  				"table_with_enum_partkey": 20})
  2111  		})
  2112  		It("Restores table data partitioned using GPDB7 partition syntax", func() {
  2113  			// This test is borrowed from pg_dump
  2114  			testutils.SkipIfBefore7(backupConn)
  2115  			testhelper.AssertQueryRuns(backupConn, `create type digit as enum ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9');`)
  2116  			// non-troublesome hashed partitioning
  2117  			testhelper.AssertQueryRuns(backupConn, `create table tplain (en digit, data int unique);
  2118  																							insert into tplain select (x%10)::text::digit, x from generate_series(1,1000) x;
  2119  																							create table ths (mod int, data int, unique(mod, data)) partition by hash(mod);
  2120  																							create table ths_p1 partition of ths for values with (modulus 3, remainder 0);
  2121  																							create table ths_p2 partition of ths for values with (modulus 3, remainder 1);
  2122  																							create table ths_p3 partition of ths for values with (modulus 3, remainder 2);
  2123  																							insert into ths select (x%10), x from generate_series(1,1000) x;`)
  2124  			// dangerous hashed partitioning
  2125  			testhelper.AssertQueryRuns(backupConn, `create table tht (en digit, data int, unique(en, data)) partition by hash(en);
  2126  																							create table tht_p1 partition of tht for values with (modulus 3, remainder 0);
  2127  																							create table tht_p2 partition of tht for values with (modulus 3, remainder 1);
  2128  																							create table tht_p3 partition of tht for values with (modulus 3, remainder 2);
  2129  																							insert into tht select (x%10)::text::digit, x from generate_series(1,1000) x;`)
  2130  
  2131  			defer testhelper.AssertQueryRuns(backupConn, "DROP TYPE digit")
  2132  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE tplain")
  2133  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE ths")
  2134  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE tht")
  2135  
  2136  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  2137  			timestamp := getBackupTimestamp(string(output))
  2138  
  2139  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  2140  				"--redirect-db", "restoredb",
  2141  				"--backup-dir", backupDir)
  2142  			assertDataRestored(restoreConn, map[string]int{
  2143  				"tplain": 1000, "ths": 1000, "tht": 1000})
  2144  		})
  2145  	})
  2146  	Describe("Restore to a different-sized cluster", func() {
  2147  		if useOldBackupVersion {
  2148  			Skip("This test is not needed for old backup versions")
  2149  		}
  2150  		// The backups for these tests were taken on GPDB version 6.20.3+dev.4.g9a08259bd1 build dev.
  2151  		BeforeEach(func() {
  2152  			testutils.SkipIfBefore6(backupConn)
  2153  			testhelper.AssertQueryRuns(backupConn, "CREATE ROLE testrole;")
  2154  		})
  2155  		AfterEach(func() {
  2156  			testhelper.AssertQueryRuns(restoreConn, fmt.Sprintf("REASSIGN OWNED BY testrole TO %s;", backupConn.User))
  2157  			testhelper.AssertQueryRuns(restoreConn, "DROP ROLE testrole;")
  2158  		})
  2159  		DescribeTable("",
  2160  			func(fullTimestamp string, incrementalTimestamp string, tarBaseName string, isIncrementalRestore bool, isFilteredRestore bool, isSingleDataFileRestore bool, testUsesPlugin bool) {
  2161  				if isSingleDataFileRestore && segmentCount != 3 {
  2162  					Skip("Single data file resize restores currently require a 3-segment cluster to test.")
  2163  				}
  2164  
  2165  				ddboostConfigPath := "/home/gpadmin/ddboost_config_replication.yaml"
  2166  				if testUsesPlugin {
  2167  					// For plugin-specific tests, assume that if we can find the ddboost configuration file
  2168  					// that we're on a CI system and should run them, otherwise skip them.
  2169  					if !utils.FileExists(ddboostConfigPath) {
  2170  						Skip("Plugin-specific tests require a configured plugin to be present in order to run.")
  2171  					}
  2172  				}
  2173  
  2174  				extractDirectory := extractSavedTarFile(backupDir, tarBaseName)
  2175  				defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schemaone CASCADE;`)
  2176  				defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schematwo CASCADE;`)
  2177  				defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schemathree CASCADE;`)
  2178  
  2179  				if !testUsesPlugin { // No need to manually move files when using a plugin
  2180  					isMultiNode := (backupCluster.GetHostForContent(0) != backupCluster.GetHostForContent(-1))
  2181  					moveSegmentBackupFiles(tarBaseName, extractDirectory, isMultiNode, fullTimestamp, incrementalTimestamp)
  2182  				}
  2183  
  2184  				gprestoreArgs := []string{
  2185  					"--redirect-db", "restoredb",
  2186  					"--backup-dir", extractDirectory,
  2187  					"--resize-cluster",
  2188  					"--on-error-continue"}
  2189  				if isFilteredRestore {
  2190  					gprestoreArgs = append(gprestoreArgs, "--include-schema", "schematwo")
  2191  				}
  2192  				gprestore(gprestorePath, restoreHelperPath, fullTimestamp, gprestoreArgs...)
  2193  
  2194  				// check row counts
  2195  				testutils.ExecuteSQLFile(restoreConn, "resources/test_rowcount_ddl.sql")
  2196  				rowcountsFilename := fmt.Sprintf("/tmp/%s-rowcounts.txt", tarBaseName)
  2197  				defer os.Remove(rowcountsFilename)
  2198  				_ = exec.Command("psql",
  2199  					"-d", "restoredb",
  2200  					"-c", "select * from cnt_rows();",
  2201  					"-o", rowcountsFilename).Run()
  2202  				expectedRowMap := unMarshalRowCounts(fmt.Sprintf("resources/%d-segment-db-rowcounts.txt", segmentCount))
  2203  				actualRowMap := unMarshalRowCounts(rowcountsFilename)
  2204  				for key := range expectedRowMap {
  2205  					if strings.HasPrefix(key, "schemathree") {
  2206  						delete(expectedRowMap, key)
  2207  					} else if isFilteredRestore && !strings.HasPrefix(key, "schematwo") {
  2208  						delete(expectedRowMap, key)
  2209  					}
  2210  				}
  2211  				if !reflect.DeepEqual(expectedRowMap, actualRowMap) {
  2212  					Fail(fmt.Sprintf("Expected row count map for full restore\n\n\t%v\n\nto equal\n\n\t%v\n\n", actualRowMap, expectedRowMap))
  2213  				}
  2214  
  2215  				if isIncrementalRestore {
  2216  					// restore subsequent incremental backup
  2217  					gprestore(gprestorePath, restoreHelperPath, incrementalTimestamp,
  2218  						"--redirect-db", "restoredb",
  2219  						"--incremental",
  2220  						"--data-only",
  2221  						"--backup-dir", extractDirectory,
  2222  						"--resize-cluster",
  2223  						"--on-error-continue")
  2224  
  2225  					// check row counts
  2226  					_ = exec.Command("psql",
  2227  						"-d", "restoredb",
  2228  						"-c", "select * from cnt_rows();",
  2229  						"-o", rowcountsFilename).Run()
  2230  					expectedIncrRowMap := unMarshalRowCounts(fmt.Sprintf("resources/%d-segment-db-incremental-rowcounts.txt", segmentCount))
  2231  					actualIncrRowMap := unMarshalRowCounts(rowcountsFilename)
  2232  
  2233  					if !reflect.DeepEqual(expectedIncrRowMap, actualIncrRowMap) {
  2234  						Fail(fmt.Sprintf("Expected row count map for incremental restore\n%v\nto equal\n%v\n", actualIncrRowMap, expectedIncrRowMap))
  2235  					}
  2236  				}
  2237  			},
  2238  			Entry("Can backup a 9-segment cluster and restore to current cluster", "20220909090738", "", "9-segment-db", false, false, false, false),
  2239  			Entry("Can backup a 9-segment cluster and restore to current cluster with single data file", "20220909090827", "", "9-segment-db-single-data-file", false, false, true, false),
  2240  			Entry("Can backup a 9-segment cluster and restore to current cluster with incremental backups", "20220909150254", "20220909150353", "9-segment-db-incremental", true, false, false, false),
  2241  
  2242  			Entry("Can backup a 7-segment cluster and restore to current cluster", "20220908145504", "", "7-segment-db", false, false, false, false),
  2243  			Entry("Can backup a 7-segment cluster and restore to current cluster single data file", "20220912101931", "", "7-segment-db-single-data-file", false, false, true, false),
  2244  			Entry("Can backup a 7-segment cluster and restore to current cluster with a filter", "20220908145645", "", "7-segment-db-filter", false, true, false, false),
  2245  			Entry("Can backup a 7-segment cluster and restore to current cluster with single data file and filter", "20220912102413", "", "7-segment-db-single-data-file-filter", false, true, true, false),
  2246  			Entry("Can backup a 2-segment cluster and restore to current cluster single data file and filter", "20220908150223", "", "2-segment-db-single-data-file-filter", false, true, true, false),
  2247  			Entry("Can backup a 2-segment cluster and restore to current cluster single data file", "20220908150159", "", "2-segment-db-single-data-file", false, false, true, false),
  2248  			Entry("Can backup a 2-segment cluster and restore to current cluster with filter", "20220908150238", "", "2-segment-db-filter", false, true, false, false),
  2249  			Entry("Can backup a 2-segment cluster and restore to current cluster with incremental backups and a single data file", "20220909150612", "20220909150622", "2-segment-db-incremental", true, false, false, false),
  2250  			Entry("Can backup a 1-segment cluster and restore to current cluster", "20220908150735", "", "1-segment-db", false, false, false, false),
  2251  			Entry("Can backup a 1-segment cluster and restore to current cluster with single data file", "20220908150752", "", "1-segment-db-single-data-file", false, false, true, false),
  2252  			Entry("Can backup a 1-segment cluster and restore to current cluster with a filter", "20220908150804", "", "1-segment-db-filter", false, true, false, false),
  2253  			Entry("Can backup a 3-segment cluster and restore to current cluster", "20220909094828", "", "3-segment-db", false, false, false, false),
  2254  
  2255  			Entry("Can backup a 2-segment using gpbackup 1.26.0 and restore to current cluster", "20230516032007", "", "2-segment-db-1_26_0", false, false, false, false),
  2256  
  2257  			// These tests will only run in CI, to avoid requiring developers to configure a plugin locally.
  2258  			// We don't do as many combinatoric tests for resize restores using plugins, partly for storage space reasons and partly because
  2259  			// we assume that if all of the above resize restores work and basic plugin restores work then the intersection should also work.
  2260  			Entry("Can perform a backup and full restore of a 7-segment cluster using a plugin", "20220912101931", "", "7-segment-db-single-data-file", false, false, true, true),
  2261  			Entry("Can perform a backup and full restore of a 2-segment cluster using a plugin", "20220908150159", "", "2-segment-db-single-data-file", false, false, true, true),
  2262  			Entry("Can perform a backup and incremental restore of a 2-segment cluster using a plugin", "20220909150612", "20220909150622", "2-segment-db-incremental", true, false, false, true),
  2263  		)
  2264  		It("will not restore a pre-1.26.0 backup that lacks a stored SegmentCount value", func() {
  2265  			extractDirectory := extractSavedTarFile(backupDir, "2-segment-db-1_24_0")
  2266  
  2267  			gprestoreCmd := exec.Command(gprestorePath,
  2268  				"--timestamp", "20230516021751",
  2269  				"--redirect-db", "restoredb",
  2270  				"--backup-dir", extractDirectory,
  2271  				"--resize-cluster",
  2272  				"--on-error-continue",
  2273  				"--verbose")
  2274  			output, err := gprestoreCmd.CombinedOutput()
  2275  			Expect(err).To(HaveOccurred())
  2276  			Expect(string(output)).To(ContainSubstring("Segment count for backup with timestamp 20230516021751 is unknown, cannot restore using --resize-cluster flag."))
  2277  		})
  2278  
  2279  		Describe("Restore from various-sized clusters with a replicated table", func() {
  2280  			if useOldBackupVersion {
  2281  				Skip("This test is not needed for old backup versions")
  2282  			}
  2283  			// The backups for these tests were taken on GPDB version 6.20.3+dev.4.g9a08259bd1 build dev.
  2284  			DescribeTable("",
  2285  				func(fullTimestamp string, tarBaseName string) {
  2286  
  2287  					testutils.SkipIfBefore6(backupConn)
  2288  					if useOldBackupVersion {
  2289  						Skip("Resize-cluster was only added in version 1.26")
  2290  					}
  2291  					extractDirectory := extractSavedTarFile(backupDir, tarBaseName)
  2292  					defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schemaone CASCADE;`)
  2293  
  2294  					isMultiNode := (backupCluster.GetHostForContent(0) != backupCluster.GetHostForContent(-1))
  2295  					moveSegmentBackupFiles(tarBaseName, extractDirectory, isMultiNode, fullTimestamp)
  2296  
  2297  					gprestore(gprestorePath, restoreHelperPath, fullTimestamp,
  2298  						"--redirect-db", "restoredb",
  2299  						"--backup-dir", extractDirectory,
  2300  						"--resize-cluster",
  2301  						"--on-error-continue")
  2302  
  2303  					// check row counts on each segment and on coordinator, expecting 1 table with 100 rows, replicated across all
  2304  					for _, seg := range backupCluster.Segments {
  2305  						if seg.ContentID != -1 {
  2306  							assertSegmentDataRestored(seg.ContentID, "schemaone.test_table", 100)
  2307  						}
  2308  					}
  2309  					assertDataRestored(restoreConn, map[string]int{
  2310  						"schemaone.test_table": 100,
  2311  					})
  2312  
  2313  					// check check gp_distribution_policy at end of test to ensure it's set to destSize
  2314  					numSegments := dbconn.MustSelectString(restoreConn, "SELECT numsegments FROM gp_distribution_policy where localoid = 'schemaone.test_table'::regclass::oid")
  2315  					Expect(numSegments).To(Equal(strconv.Itoa(segmentCount)))
  2316  
  2317  				},
  2318  				Entry("Can backup a 1-segment cluster and restore to current cluster with replicated tables", "20221104023842", "1-segment-db-replicated"),
  2319  				Entry("Can backup a 3-segment cluster and restore to current cluster with replicated tables", "20221104023611", "3-segment-db-replicated"),
  2320  				Entry("Can backup a 9-segment cluster and restore to current cluster with replicated tables", "20221104025347", "9-segment-db-replicated"),
  2321  			)
  2322  		})
  2323  
  2324  		It("Will not restore to a different-size cluster if the SegmentCount of the backup is unknown", func() {
  2325  			if useOldBackupVersion {
  2326  				Skip("This test is not needed for old backup versions")
  2327  			}
  2328  			// This backup set is identical to the 5-segment-db-tar.gz backup set, except that the
  2329  			// segmentcount parameter was removed from the config file in the coordinator data directory.
  2330  			command := exec.Command("tar", "-xzf", "resources/no-segment-count-db.tar.gz", "-C", backupDir)
  2331  			mustRunCommand(command)
  2332  
  2333  			gprestoreCmd := exec.Command(gprestorePath,
  2334  				"--timestamp", "20220415160842",
  2335  				"--redirect-db", "restoredb",
  2336  				"--backup-dir", path.Join(backupDir, "5-segment-db"),
  2337  				"--resize-cluster",
  2338  				"--on-error-continue",
  2339  				"--verbose")
  2340  			output, err := gprestoreCmd.CombinedOutput()
  2341  			Expect(err).To(HaveOccurred())
  2342  			Expect(string(output)).To(MatchRegexp("Segment count for backup with timestamp [0-9]+ is unknown, cannot restore using --resize-cluster flag"))
  2343  		})
  2344  		It("Will not restore to a different-size cluster without the approprate flag", func() {
  2345  			command := exec.Command("tar", "-xzf", "resources/5-segment-db.tar.gz", "-C", backupDir)
  2346  			mustRunCommand(command)
  2347  
  2348  			gprestoreCmd := exec.Command(gprestorePath,
  2349  				"--timestamp", "20220415160842",
  2350  				"--redirect-db", "restoredb",
  2351  				"--backup-dir", path.Join(backupDir, "5-segment-db"),
  2352  				"--on-error-continue",
  2353  				"--verbose")
  2354  			output, err := gprestoreCmd.CombinedOutput()
  2355  			Expect(err).To(HaveOccurred())
  2356  			Expect(string(output)).To(ContainSubstring(fmt.Sprintf("Cannot restore a backup taken on a cluster with 5 segments to a cluster with %d segments unless the --resize-cluster flag is used.", segmentCount)))
  2357  		})
  2358  		It("Can backup 7-segment cluster and continue to resize restore to current cluster if table already exists with incompatible column type", func() {
  2359  			if segmentCount != 3 {
  2360  				Skip("Single data file resize restores currently require a 3-segment cluster to test.")
  2361  			}
  2362  
  2363  			fullTimestamp := "20220912101931"
  2364  			tarBaseName := "7-segment-db-single-data-file"
  2365  			extractDirectory := extractSavedTarFile(backupDir, tarBaseName)
  2366  			isMultiNode := (backupCluster.GetHostForContent(0) != backupCluster.GetHostForContent(-1))
  2367  			moveSegmentBackupFiles(tarBaseName, extractDirectory, isMultiNode, fullTimestamp)
  2368  
  2369  			testhelper.AssertQueryRuns(restoreConn, "CREATE SCHEMA schemaone;")
  2370  			testhelper.AssertQueryRuns(restoreConn, "CREATE TABLE schemaone.tabletwo(i date, t text);")
  2371  			defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schemaone CASCADE;`)
  2372  			defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schematwo CASCADE;`)
  2373  			defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schemathree CASCADE;`)
  2374  
  2375  			gprestoreCmd := exec.Command(gprestorePath,
  2376  				"--timestamp", fullTimestamp,
  2377  				"--redirect-db", "restoredb",
  2378  				"--backup-dir", path.Join(backupDir, tarBaseName),
  2379  				"--resize-cluster",
  2380  				"--on-error-continue",
  2381  				"--verbose")
  2382  			output, err := gprestoreCmd.CombinedOutput()
  2383  			Expect(err).To(HaveOccurred())
  2384  			Expect(string(output)).To(ContainSubstring("Encountered 1 errors during metadata restore"))
  2385  			Expect(string(output)).To(ContainSubstring("Error loading data into table schemaone.tabletwo"))
  2386  			Expect(string(output)).To(ContainSubstring("Encountered 1 error(s) during table data restore"))
  2387  			Expect(string(output)).To(ContainSubstring("Data restore completed with failures"))
  2388  		})
  2389  	})
  2390  	Describe("Restore indexes and constraints on exchanged partition tables", func() {
  2391  		BeforeEach(func() {
  2392  			testutils.SkipIfBefore6(backupConn)
  2393  			testhelper.AssertQueryRuns(backupConn, `
  2394                      CREATE SCHEMA schemaone;
  2395                      CREATE TABLE schemaone.part_table_for_upgrade (a INT, b INT) DISTRIBUTED BY (b) PARTITION BY RANGE(b) (PARTITION alpha  END (3), PARTITION beta START (3));
  2396  					CREATE INDEX upgrade_idx1 ON schemaone.part_table_for_upgrade(a) WHERE b > 10;
  2397  					ALTER TABLE schemaone.part_table_for_upgrade ADD PRIMARY KEY(a, b);
  2398  
  2399  					CREATE TABLE schemaone.like_table (like schemaone.part_table_for_upgrade INCLUDING CONSTRAINTS INCLUDING INDEXES) DISTRIBUTED BY (b);
  2400                      ALTER TABLE schemaone.part_table_for_upgrade EXCHANGE PARTITION beta WITH TABLE schemaone.like_table;`)
  2401  		})
  2402  		AfterEach(func() {
  2403  			testhelper.AssertQueryRuns(backupConn, "DROP SCHEMA schemaone CASCADE;")
  2404  			testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA schemaone CASCADE;")
  2405  		})
  2406  
  2407  		It("Automatically updates index names correctly", func() {
  2408  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  2409  			timestamp := getBackupTimestamp(string(output))
  2410  
  2411  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  2412  				"--redirect-db", "restoredb",
  2413  				"--backup-dir", backupDir)
  2414  
  2415  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
  2416  
  2417  			// Indexes do not need to be renamed on partition exchange in GPDB7+ due to new syntax.
  2418  			expectedValue := false
  2419  			indexSuffix := "idx"
  2420  			if backupConn.Version.Is("6") {
  2421  				// In GPDB6, indexes are automatically cascaded down and so in exchange case they must be renamed to avoid name collision breaking restore
  2422  				expectedValue = true
  2423  			}
  2424  			Expect(strings.Contains(string(metadataFileContents), fmt.Sprintf("CREATE INDEX like_table_a_%s ON schemaone.like_table USING btree (a) WHERE (b > 10);",
  2425  				indexSuffix))).To(Equal(expectedValue))
  2426  			Expect(strings.Contains(string(metadataFileContents), fmt.Sprintf("CREATE INDEX part_table_for_upgrade_1_prt_beta_a_%s ON schemaone.like_table USING btree (a) WHERE (b > 10);",
  2427  				indexSuffix))).ToNot(Equal(expectedValue))
  2428  		})
  2429  
  2430  		It("Automatically updates constraint names correctly", func() {
  2431  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  2432  			timestamp := getBackupTimestamp(string(output))
  2433  
  2434  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  2435  				"--redirect-db", "restoredb",
  2436  				"--backup-dir", backupDir)
  2437  
  2438  			// assert constraint names are what we expect
  2439  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
  2440  			Expect(strings.Contains(string(metadataFileContents), "ALTER TABLE ONLY schemaone.like_table ADD CONSTRAINT like_table_pkey PRIMARY KEY (a, b);")).To(BeTrue())
  2441  			Expect(strings.Contains(string(metadataFileContents), "ALTER TABLE ONLY schemaone.like_table ADD CONSTRAINT part_table_for_upgrade_pkey PRIMARY KEY (a, b);")).ToNot(BeTrue())
  2442  
  2443  		})
  2444  	})
  2445  	Describe("Backup and restore external partitions", func() {
  2446  		It("Will correctly handle external partitions on multiple versions of GPDB", func() {
  2447  			testutils.SkipIfBefore6(backupConn)
  2448  			testhelper.AssertQueryRuns(backupConn, "CREATE SCHEMA testchema;")
  2449  			defer testhelper.AssertQueryRuns(backupConn, "DROP SCHEMA IF EXISTS testchema CASCADE;")
  2450  			defer testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA IF EXISTS testchema CASCADE;")
  2451  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE testchema.multipartition (a int,b date,c text,d int)
  2452                     DISTRIBUTED BY (a)
  2453                     PARTITION BY RANGE (b)
  2454                     SUBPARTITION BY LIST (c)
  2455                     SUBPARTITION TEMPLATE
  2456                     (SUBPARTITION usa values ('usa'),
  2457                     SUBPARTITION apj values ('apj'),
  2458                     SUBPARTITION eur values ('eur'))
  2459                     (PARTITION Jan16 START (date '2016-01-01') INCLUSIVE ,
  2460                       PARTITION Feb16 START (date '2016-02-01') INCLUSIVE ,
  2461                       PARTITION Mar16 START (date '2016-03-01') INCLUSIVE ,
  2462                       PARTITION Apr16 START (date '2016-04-01') INCLUSIVE ,
  2463                       PARTITION May16 START (date '2016-05-01') INCLUSIVE ,
  2464                       PARTITION Jun16 START (date '2016-06-01') INCLUSIVE ,
  2465                       PARTITION Jul16 START (date '2016-07-01') INCLUSIVE ,
  2466                       PARTITION Aug16 START (date '2016-08-01') INCLUSIVE ,
  2467                       PARTITION Sep16 START (date '2016-09-01') INCLUSIVE ,
  2468                       PARTITION Oct16 START (date '2016-10-01') INCLUSIVE ,
  2469                       PARTITION Nov16 START (date '2016-11-01') INCLUSIVE ,
  2470                       PARTITION Dec16 START (date '2016-12-01') INCLUSIVE
  2471                                       END (date '2017-01-01') EXCLUSIVE);
  2472                     CREATE EXTERNAL TABLE testchema.external_apj (a INT,b DATE,c TEXT,d INT) LOCATION ('gpfdist://127.0.0.1/apj') format 'text';
  2473                     ALTER TABLE testchema.multipartition ALTER PARTITION Dec16 EXCHANGE PARTITION apj WITH TABLE testchema.external_apj WITHOUT VALIDATION;
  2474                     `)
  2475  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  2476  			timestamp := getBackupTimestamp(string(output))
  2477  
  2478  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
  2479  			Expect(metadataFileContents).ToNot(BeEmpty())
  2480  
  2481  			if backupConn.Version.AtLeast("7") {
  2482  				//GPDB7+ has new "attach table" partition syntax, does not require exchanging for external partitions
  2483  				Expect(string(metadataFileContents)).To(ContainSubstring("CREATE READABLE EXTERNAL TABLE testchema.multipartition_1_prt_dec16_2_prt_apj ("))
  2484  				Expect(string(metadataFileContents)).To(ContainSubstring("ALTER TABLE ONLY testchema.multipartition_1_prt_dec16 ATTACH PARTITION testchema.multipartition_1_prt_dec16_2_prt_apj FOR VALUES IN ('apj');"))
  2485  			} else {
  2486  				// GPDB5/6 use legacy GPDB syntax, and need an exchange to have an external partition
  2487  				Expect(string(metadataFileContents)).To(ContainSubstring("CREATE READABLE EXTERNAL TABLE testchema.multipartition_1_prt_dec16_2_prt_apj_ext_part_ ("))
  2488  				Expect(string(metadataFileContents)).To(ContainSubstring("ALTER TABLE testchema.multipartition ALTER PARTITION dec16 EXCHANGE PARTITION apj WITH TABLE testchema.multipartition_1_prt_dec16_2_prt_apj_ext_part_ WITHOUT VALIDATION;"))
  2489  			}
  2490  
  2491  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  2492  				"--redirect-db", "restoredb",
  2493  				"--backup-dir", backupDir)
  2494  		})
  2495  	})
  2496  	Describe("Backup and restore multi-layer leaf-partition backups filtered to parent or child tables with intermediate partitions on GPDB7+", func() {
  2497  		BeforeEach(func() {
  2498  			testutils.SkipIfBefore7(backupConn)
  2499  			testhelper.AssertQueryRuns(backupConn, "CREATE SCHEMA schemaone;")
  2500  			// load up two tables with some test data each, to confirm that only one is backed up and restored
  2501  			testhelper.AssertQueryRuns(backupConn, `
  2502                        DROP TABLE IF EXISTS schemaone.measurement CASCADE;
  2503                        CREATE TABLE schemaone.measurement (
  2504                            city_id         int not null,
  2505                            logdate         date not null,
  2506                            peaktemp        int,
  2507                            unitsales       int default 42
  2508                        ) PARTITION BY RANGE (logdate);
  2509  
  2510                        ALTER TABLE schemaone.measurement ADD CONSTRAINT parent_city_id_unique UNIQUE (city_id, logdate, peaktemp, unitsales);
  2511  
  2512                        CREATE TABLE schemaone.measurement_y2006m02 PARTITION OF schemaone.measurement
  2513                            FOR VALUES FROM ('2006-02-01') TO ('2006-03-01')
  2514                            PARTITION BY RANGE (peaktemp);
  2515  
  2516                        ALTER TABLE schemaone.measurement_y2006m02 ADD CONSTRAINT intermediate_check CHECK (peaktemp < 1000);
  2517  
  2518                        CREATE TABLE schemaone.measurement_peaktemp_0_100 PARTITION OF schemaone.measurement_y2006m02
  2519                            FOR VALUES FROM (0) TO (100)
  2520                            PARTITION BY RANGE (unitsales);
  2521  
  2522                        CREATE TABLE schemaone.measurement_peaktemp_catchall PARTITION OF schemaone.measurement_peaktemp_0_100
  2523                            FOR VALUES FROM (1) TO (100);
  2524  
  2525                        CREATE TABLE schemaone.measurement_default PARTITION OF schemaone.measurement_y2006m02 DEFAULT;
  2526  
  2527                        CREATE TABLE schemaone.measurement_y2006m03 PARTITION OF schemaone.measurement
  2528                            FOR VALUES FROM ('2006-03-01') TO ('2006-04-01');
  2529  
  2530                        CREATE TABLE schemaone.measurement_y2007m11 PARTITION OF schemaone.measurement
  2531                            FOR VALUES FROM ('2007-11-01') TO ('2007-12-01');
  2532  
  2533                        CREATE TABLE schemaone.measurement_y2007m12 PARTITION OF schemaone.measurement
  2534                            FOR VALUES FROM ('2007-12-01') TO ('2008-01-01');
  2535  
  2536                        CREATE TABLE schemaone.measurement_y2008m01 PARTITION OF schemaone.measurement
  2537                            FOR VALUES FROM ('2008-01-01') TO ('2008-02-01');
  2538  
  2539                        ALTER TABLE schemaone.measurement_y2008m01 ADD CONSTRAINT city_id_unique UNIQUE (city_id);
  2540  
  2541                        INSERT INTO schemaone.measurement VALUES (42, '2006-02-22', 75, 80);
  2542                        INSERT INTO schemaone.measurement VALUES (42, '2006-03-05', 75, 80);
  2543                        INSERT INTO schemaone.measurement VALUES (42, '2007-12-22', 75, 80);
  2544                        INSERT INTO schemaone.measurement VALUES (42, '2007-12-20', 75, 80);
  2545                        INSERT INTO schemaone.measurement VALUES (42, '2007-11-20', 75, 80);
  2546                        INSERT INTO schemaone.measurement VALUES (42, '2006-02-01', 75, 99);
  2547                        INSERT INTO schemaone.measurement VALUES (42, '2006-02-22', 75, 60);
  2548                        INSERT INTO schemaone.measurement VALUES (42, '2007-11-15', 75, 80);
  2549                     `)
  2550  			defer testhelper.AssertQueryRuns(backupConn, "")
  2551  		})
  2552  
  2553  		AfterEach(func() {
  2554  			testhelper.AssertQueryRuns(backupConn, "DROP SCHEMA IF EXISTS schemaone CASCADE;")
  2555  			testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA IF EXISTS schemaone CASCADE;")
  2556  		})
  2557  		DescribeTable("",
  2558  			func(includeTableName string, secondaryIncludeTableName string, expectedTableCount string, expectedRootRowCount string, expectedLeafRowCount string) {
  2559  				var output []byte
  2560  				if secondaryIncludeTableName != "" {
  2561  					output = gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--leaf-partition-data",
  2562  						"--include-table", includeTableName,
  2563  						"--include-table", secondaryIncludeTableName)
  2564  				} else {
  2565  					output = gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--leaf-partition-data", "--include-table", includeTableName)
  2566  				}
  2567  				testhelper.AssertQueryRuns(restoreConn, "CREATE SCHEMA schemaone;")
  2568  				timestamp := getBackupTimestamp(string(output))
  2569  
  2570  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  2571  					"--redirect-db", "restoredb",
  2572  					"--backup-dir", backupDir)
  2573  
  2574  				tableCount := dbconn.MustSelectString(restoreConn, "SELECT count(*) FROM information_schema.tables where table_schema = 'schemaone';")
  2575  				Expect(tableCount).To(Equal(expectedTableCount))
  2576  
  2577  				rootRowCount := dbconn.MustSelectString(restoreConn, "SELECT count(*) FROM schemaone.measurement;")
  2578  				Expect(rootRowCount).To(Equal(expectedRootRowCount))
  2579  
  2580  				leafRowCount := dbconn.MustSelectString(restoreConn, "SELECT count(*) FROM schemaone.measurement_peaktemp_catchall;")
  2581  				Expect(leafRowCount).To(Equal(expectedLeafRowCount))
  2582  			},
  2583  			Entry("Will correctly handle filtering on child table", "schemaone.measurement_peaktemp_catchall", "", "4", "3", "3"),
  2584  			Entry("Will correctly handle filtering on child table", "schemaone.measurement", "", "9", "8", "3"),
  2585  			Entry("Will correctly handle filtering on child table", "schemaone.measurement", "schemaone.measurement_peaktemp_catchall", "9", "8", "3"),
  2586  		)
  2587  	})
  2588  	Describe("Concurrent backups will only work if given unique backup directories and the flags: metadata-only, backup-dir, and no-history", func() {
  2589  		var backupDir1 string
  2590  		var backupDir2 string
  2591  		var backupDir3 string
  2592  		BeforeEach(func() {
  2593  			backupDir1 = path.Join(backupDir, "conc_test1")
  2594  			backupDir2 = path.Join(backupDir, "conc_test2")
  2595  			backupDir3 = path.Join(backupDir, "conc_test3")
  2596  			os.Mkdir(backupDir1, 0777)
  2597  			os.Mkdir(backupDir2, 0777)
  2598  			os.Mkdir(backupDir3, 0777)
  2599  		})
  2600  		AfterEach(func() {
  2601  			os.RemoveAll(backupDir1)
  2602  			os.RemoveAll(backupDir2)
  2603  			os.RemoveAll(backupDir3)
  2604  		})
  2605  		It("backs up successfully with the correct flags", func() {
  2606  			// --no-history flag was added in 1.28.0
  2607  			skipIfOldBackupVersionBefore("1.28.0")
  2608  			command1 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir1, "--no-history", "--metadata-only")
  2609  			command2 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir2, "--no-history", "--metadata-only")
  2610  			command3 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir3, "--no-history", "--metadata-only")
  2611  			commands := []*exec.Cmd{command1, command2, command3}
  2612  
  2613  			var backWg sync.WaitGroup
  2614  			errchan := make(chan error, len(commands))
  2615  			for _, cmd := range commands {
  2616  				backWg.Add(1)
  2617  				go func(command *exec.Cmd) {
  2618  					defer backWg.Done()
  2619  					_, err := command.CombinedOutput()
  2620  					errchan <- err
  2621  				}(cmd)
  2622  			}
  2623  			backWg.Wait()
  2624  			close(errchan)
  2625  
  2626  			for err := range errchan {
  2627  				Expect(err).ToNot(HaveOccurred())
  2628  			}
  2629  		})
  2630  		It("fails without the correct flags", func() {
  2631  			command1 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir1)
  2632  			command2 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir1)
  2633  			command3 := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir1)
  2634  			commands := []*exec.Cmd{command1, command2, command3}
  2635  
  2636  			var backWg sync.WaitGroup
  2637  			errchan := make(chan error, len(commands))
  2638  			for _, cmd := range commands {
  2639  				backWg.Add(1)
  2640  				go func(command *exec.Cmd) {
  2641  					defer backWg.Done()
  2642  					_, err := command.CombinedOutput()
  2643  					errchan <- err
  2644  				}(cmd)
  2645  			}
  2646  			backWg.Wait()
  2647  			close(errchan)
  2648  
  2649  			errcounter := 0
  2650  			for err := range errchan {
  2651  				if err != nil {
  2652  					errcounter++
  2653  				}
  2654  			}
  2655  			Expect(errcounter > 0).To(BeTrue())
  2656  		})
  2657  	})
  2658  	Describe("Filtered backups with --no-inherits", func() {
  2659  		It("will not include children or parents of included tables", func() {
  2660  			if useOldBackupVersion {
  2661  				Skip("This test is not needed for old backup versions")
  2662  			}
  2663  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE public.parent_one(one int);`)
  2664  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE public.parent_two(two int);`)
  2665  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE public.base() INHERITS (public.parent_one, public.parent_two);`)
  2666  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE public.child_one() INHERITS (public.base);`)
  2667  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE public.child_two() INHERITS (public.base);`)
  2668  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE public.unrelated(three int);`)
  2669  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE public.parent_one CASCADE")
  2670  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE public.parent_two CASCADE")
  2671  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE public.unrelated")
  2672  
  2673  			output := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--include-table", "public.base", "--no-inherits")
  2674  			timestamp := getBackupTimestamp(string(output))
  2675  
  2676  			contents := string(getMetdataFileContents(backupDir, timestamp, "metadata.sql"))
  2677  			Expect(contents).To(ContainSubstring("CREATE TABLE public.base"))
  2678  			Expect(contents).ToNot(ContainSubstring("CREATE TABLE public.parent_one"))
  2679  			Expect(contents).ToNot(ContainSubstring("CREATE TABLE public.parent_two"))
  2680  			Expect(contents).ToNot(ContainSubstring("CREATE TABLE public.child_one"))
  2681  			Expect(contents).ToNot(ContainSubstring("CREATE TABLE public.child_two"))
  2682  			Expect(contents).ToNot(ContainSubstring("CREATE TABLE public.unrelated"))
  2683  		})
  2684  	})
  2685  	Describe("Report files", func() {
  2686  		It("prints the correct end time in the report file", func() {
  2687  			testutils.SkipIfBefore7(backupConn)
  2688  			testhelper.AssertQueryRuns(backupConn, `CREATE SCHEMA testschema`)
  2689  			// We need enough tables for the backup to take multiple seconds, so create a bunch of them
  2690  			for i := 0; i < 100; i++ {
  2691  				testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`CREATE TABLE testschema.foo%d(i int)`, i))
  2692  				testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`INSERT INTO testschema.foo%d SELECT generate_series(1,10000)`, i))
  2693  			}
  2694  			defer testhelper.AssertQueryRuns(backupConn, "DROP SCHEMA testschema CASCADE")
  2695  
  2696  			gpbackupCmd := exec.Command(gpbackupPath, "--dbname", "testdb", "--backup-dir", backupDir)
  2697  			out, err := gpbackupCmd.CombinedOutput()
  2698  			Expect(err).ToNot(HaveOccurred())
  2699  			output := string(out)
  2700  			timestampRegex := regexp.MustCompile(`Backup Timestamp = (\d{14})`)
  2701  			timestamp := timestampRegex.FindStringSubmatch(output)[1]
  2702  
  2703  			// Grab the printed timestamp from the last line of the output and the timestamp in the report file,
  2704  			// then convert the printed one into the same format as the report one for comparison
  2705  			lines := strings.Split(strings.TrimSpace(output), "\n")
  2706  			stdoutTimestamp := strings.Split(lines[len(lines)-1], " ")[0]
  2707  			stdoutTime, _ := time.ParseInLocation("20060102:15:04:05", stdoutTimestamp, time.Local)
  2708  			stdoutEndTime := stdoutTime.Format("Mon Jan 02 2006 15:04:05")
  2709  
  2710  			reportRegex := regexp.MustCompile(`end time: +(.+)\n`)
  2711  			contents := string(getMetdataFileContents(backupDir, timestamp, "report"))
  2712  			reportEndTime := reportRegex.FindStringSubmatch(contents)[1]
  2713  
  2714  			if stdoutEndTime != reportEndTime {
  2715  				// The times *should* be identical, but DoTeardown might be a second off, so we accept a 1-second difference
  2716  				marginTime := stdoutTime.Add(time.Second * -1)
  2717  				marginEndTime := marginTime.Format("Mon Jan 02 2006 15:04:05")
  2718  				if marginEndTime != reportEndTime {
  2719  					Fail(fmt.Sprintf("Expected printed timestamp %s to match timestamp %s in report file", stdoutEndTime, reportEndTime))
  2720  				}
  2721  			}
  2722  		})
  2723  	})
  2724  	Describe("Running gprestore without the --timestamp flag", func() {
  2725  		BeforeEach(func() {
  2726  			// All of the gpbackup calls below use --metadata-only, so there's nothing to clean up on the segments
  2727  			os.RemoveAll(fmt.Sprintf("%s/backups", backupCluster.GetDirForContent(-1)))
  2728  			os.RemoveAll("/tmp/no-timestamp-tests")
  2729  		})
  2730  		AfterEach(func() {
  2731  			os.RemoveAll("/tmp/no-timestamp-tests")
  2732  		})
  2733  
  2734  		It("throws an error if there is a single backup in the normal backup location", func() {
  2735  			gpbackup(gpbackupPath, backupHelperPath, "--verbose", "--metadata-only")
  2736  
  2737  			output, err := exec.Command(gprestorePath, "--verbose", "--redirect-db", "restoredb").CombinedOutput()
  2738  			Expect(err).To(HaveOccurred())
  2739  			Expect(string(output)).ToNot(ContainSubstring("Restore completed successfully"))
  2740  		})
  2741  		It("functions normally if there is a single backup in a user-provided backup directory", func() {
  2742  			gpbackup(gpbackupPath, backupHelperPath, "--verbose", "--metadata-only", "--backup-dir", "/tmp/no-timestamp-tests")
  2743  
  2744  			output, err := exec.Command(gprestorePath, "--verbose", "--redirect-db", "restoredb", "--backup-dir", "/tmp/no-timestamp-tests").CombinedOutput()
  2745  			Expect(err).ToNot(HaveOccurred())
  2746  			Expect(string(output)).To(ContainSubstring("Restore completed successfully"))
  2747  		})
  2748  		It("errors out if there are no backups in a user-provided backup directory", func() {
  2749  			output, err := exec.Command(gprestorePath, "--verbose", "--backup-dir", "/tmp/no-timestamp-tests").CombinedOutput()
  2750  			Expect(err).To(HaveOccurred())
  2751  			Expect(string(output)).To(ContainSubstring("No timestamp directories found"))
  2752  		})
  2753  		It("errors out if there are multiple backups in a user-provided backup directory", func() {
  2754  			gpbackup(gpbackupPath, backupHelperPath, "--verbose", "--metadata-only", "--backup-dir", "/tmp/no-timestamp-tests")
  2755  			gpbackup(gpbackupPath, backupHelperPath, "--verbose", "--metadata-only", "--backup-dir", "/tmp/no-timestamp-tests")
  2756  
  2757  			output, err := exec.Command(gprestorePath, "--verbose", "--backup-dir", "/tmp/no-timestamp-tests").CombinedOutput()
  2758  			Expect(err).To(HaveOccurred())
  2759  			Expect(string(output)).To(ContainSubstring("Multiple timestamp directories found"))
  2760  		})
  2761  	})
  2762  })