github.com/tuhaihe/gpbackup@v1.0.3/end_to_end/end_to_end_suite_test.go (about)

     1  package end_to_end_test
     2  
     3  import (
     4  	"encoding/csv"
     5  	"flag"
     6  	"fmt"
     7  	"io/ioutil"
     8  	"os"
     9  	"os/exec"
    10  	path "path/filepath"
    11  	"reflect"
    12  	"regexp"
    13  	"sort"
    14  	"strconv"
    15  	"strings"
    16  	"testing"
    17  	"time"
    18  
    19  	"github.com/blang/semver"
    20  	"github.com/tuhaihe/gp-common-go-libs/cluster"
    21  	"github.com/tuhaihe/gp-common-go-libs/dbconn"
    22  	"github.com/tuhaihe/gp-common-go-libs/iohelper"
    23  	"github.com/tuhaihe/gp-common-go-libs/operating"
    24  	"github.com/tuhaihe/gp-common-go-libs/structmatcher"
    25  	"github.com/tuhaihe/gp-common-go-libs/testhelper"
    26  	"github.com/tuhaihe/gpbackup/backup"
    27  	"github.com/tuhaihe/gpbackup/filepath"
    28  	"github.com/tuhaihe/gpbackup/testutils"
    29  	"github.com/tuhaihe/gpbackup/toc"
    30  	"github.com/tuhaihe/gpbackup/utils"
    31  	"github.com/pkg/errors"
    32  	"github.com/spf13/pflag"
    33  
    34  	. "github.com/onsi/ginkgo/v2"
    35  	. "github.com/onsi/gomega"
    36  	"github.com/onsi/gomega/format"
    37  	. "github.com/onsi/gomega/gexec"
    38  
    39  	"gopkg.in/yaml.v2"
    40  )
    41  
    42  /* The backup directory must be unique per test. There is test flakiness
    43   * against Data Domain Boost mounted file systems due to how it handles
    44   * directory deletion/creation.
    45   */
    46  var (
    47  	customBackupDir string
    48  
    49  	useOldBackupVersion bool
    50  	oldBackupSemVer     semver.Version
    51  
    52  	backupCluster           *cluster.Cluster
    53  	historyFilePath         string
    54  	saveHistoryFilePath     = "/tmp/end_to_end_save_history_file.yaml"
    55  	testFailure             bool
    56  	backupConn              *dbconn.DBConn
    57  	restoreConn             *dbconn.DBConn
    58  	gpbackupPath            string
    59  	backupHelperPath        string
    60  	restoreHelperPath       string
    61  	gprestorePath           string
    62  	pluginConfigPath        string
    63  	publicSchemaTupleCounts map[string]int
    64  	schema2TupleCounts      map[string]int
    65  	backupDir               string
    66  	segmentCount            int
    67  )
    68  
    69  const (
    70  	TOTAL_RELATIONS               = 37
    71  	TOTAL_RELATIONS_AFTER_EXCLUDE = 21
    72  	TOTAL_CREATE_STATEMENTS       = 9
    73  )
    74  
    75  // This function is run automatically by ginkgo before any tests are run.
    76  func init() {
    77  	flag.StringVar(&customBackupDir, "custom_backup_dir", "/tmp",
    78  		"custom_backup_flag for testing against a configurable directory")
    79  }
    80  
    81  /* This function is a helper function to execute gpbackup and return a session
    82   * to allow checking its output.
    83   */
    84  func gpbackup(gpbackupPath string, backupHelperPath string, args ...string) string {
    85  	if useOldBackupVersion {
    86  		_ = os.Chdir("..")
    87  		command := exec.Command("make", "install", fmt.Sprintf("helper_path=%s", backupHelperPath))
    88  		mustRunCommand(command)
    89  		_ = os.Chdir("end_to_end")
    90  	}
    91  	args = append([]string{"--verbose", "--dbname", "testdb"}, args...)
    92  	command := exec.Command(gpbackupPath, args...)
    93  	output := mustRunCommand(command)
    94  	r := regexp.MustCompile(`Backup Timestamp = (\d{14})`)
    95  	return r.FindStringSubmatch(fmt.Sprintf("%s", output))[1]
    96  }
    97  
    98  func gprestore(gprestorePath string, restoreHelperPath string, timestamp string, args ...string) []byte {
    99  	if useOldBackupVersion {
   100  		_ = os.Chdir("..")
   101  		command := exec.Command("make", "install",
   102  			fmt.Sprintf("helper_path=%s", restoreHelperPath))
   103  		mustRunCommand(command)
   104  		_ = os.Chdir("end_to_end")
   105  	}
   106  	args = append([]string{"--verbose", "--timestamp", timestamp}, args...)
   107  	command := exec.Command(gprestorePath, args...)
   108  	output := mustRunCommand(command)
   109  	return output
   110  }
   111  
   112  func buildAndInstallBinaries() (string, string, string) {
   113  	_ = os.Chdir("..")
   114  	command := exec.Command("make", "build")
   115  	mustRunCommand(command)
   116  	_ = os.Chdir("end_to_end")
   117  	binDir := fmt.Sprintf("%s/go/bin", operating.System.Getenv("HOME"))
   118  	return fmt.Sprintf("%s/gpbackup", binDir), fmt.Sprintf("%s/gpbackup_helper", binDir), fmt.Sprintf("%s/gprestore", binDir)
   119  }
   120  
   121  func buildOldBinaries(version string) (string, string) {
   122  	_ = os.Chdir("..")
   123  	command := exec.Command("git", "checkout", version, "-f")
   124  	mustRunCommand(command)
   125  	command = exec.Command("dep", "ensure")
   126  	mustRunCommand(command)
   127  	gpbackupOldPath, err := Build("github.com/tuhaihe/gpbackup",
   128  		"-tags", "gpbackup", "-ldflags",
   129  		fmt.Sprintf("-X github.com/tuhaihe/gpbackup/backup.version=%s", version))
   130  	Expect(err).ShouldNot(HaveOccurred())
   131  	gpbackupHelperOldPath, err := Build("github.com/tuhaihe/gpbackup",
   132  		"-tags", "gpbackup_helper", "-ldflags",
   133  		fmt.Sprintf("-X github.com/tuhaihe/gpbackup/helper.version=%s", version))
   134  	Expect(err).ShouldNot(HaveOccurred())
   135  	command = exec.Command("git", "checkout", "-", "-f")
   136  	mustRunCommand(command)
   137  	command = exec.Command("dep", "ensure")
   138  	mustRunCommand(command)
   139  	_ = os.Chdir("end_to_end")
   140  	return gpbackupOldPath, gpbackupHelperOldPath
   141  }
   142  
   143  func assertDataRestored(conn *dbconn.DBConn, tableToTupleCount map[string]int) {
   144  	for tableName, expectedNumTuples := range tableToTupleCount {
   145  		actualTupleCount := dbconn.MustSelectString(conn, fmt.Sprintf("SELECT count(*) AS string FROM %s", tableName))
   146  		if strconv.Itoa(expectedNumTuples) != actualTupleCount {
   147  			Fail(fmt.Sprintf("Expected:\n\t%s rows to have been restored into table %s\nActual:\n\t%s rows were restored", strconv.Itoa(expectedNumTuples), tableName, actualTupleCount))
   148  		}
   149  	}
   150  }
   151  
   152  func unMarshalRowCounts(filepath string) map[string]int {
   153  	rowFile, err := os.Open(filepath)
   154  
   155  	if err != nil {
   156  		Fail(fmt.Sprintf("Failed to open rowcount file: %s. Error: %s", filepath, err.Error()))
   157  	}
   158  	defer rowFile.Close()
   159  
   160  	reader := csv.NewReader(rowFile)
   161  	reader.Comma = '|'
   162  	reader.FieldsPerRecord = -1
   163  	rowData, err := reader.ReadAll()
   164  	if err != nil {
   165  		Fail(fmt.Sprintf("Failed to initialize rowcount reader: %s. Error: %s", filepath, err.Error()))
   166  	}
   167  
   168  	allRecords := make(map[string]int)
   169  	for idx, each := range rowData {
   170  		if idx < 2 || idx == len(rowData)-1 {
   171  			continue
   172  		}
   173  		table_schema := strings.TrimSpace(each[0])
   174  		table_name := strings.TrimSpace(each[1])
   175  		seg_id, _ := strconv.Atoi(strings.TrimSpace(each[2]))
   176  		row_count, _ := strconv.Atoi(strings.TrimSpace(each[3]))
   177  
   178  		recordKey := fmt.Sprintf("%s_%s_%d", table_schema, table_name, seg_id)
   179  		allRecords[recordKey] = row_count
   180  	}
   181  
   182  	return allRecords
   183  }
   184  
   185  func assertSegmentDataRestored(contentID int, tableName string, rows int) {
   186  	segment := backupCluster.ByContent[contentID]
   187  	port := segment[0].Port
   188  	segConn := testutils.SetupTestDBConnSegment("restoredb", port, backupConn.Version)
   189  	defer segConn.Close()
   190  	assertDataRestored(segConn, map[string]int{tableName: rows})
   191  }
   192  
   193  type PGClassStats struct {
   194  	Relpages  int
   195  	Reltuples float32
   196  }
   197  
   198  func assertPGClassStatsRestored(backupConn *dbconn.DBConn, restoreConn *dbconn.DBConn, tableToTupleCount map[string]int) {
   199  	for tableName, _ := range tableToTupleCount {
   200  		backupStats := make([]PGClassStats, 0)
   201  		restoreStats := make([]PGClassStats, 0)
   202  		pgClassStatsQuery := fmt.Sprintf("SELECT relpages, reltuples FROM pg_class WHERE oid='%s'::regclass::oid", tableName)
   203  		backupErr := backupConn.Select(&backupStats, pgClassStatsQuery)
   204  		restoreErr := restoreConn.Select(&restoreStats, pgClassStatsQuery)
   205  		if backupErr != nil {
   206  			Fail(fmt.Sprintf("Unable to get pg_class stats for table '%s' on the backup database", tableName))
   207  		} else if restoreErr != nil {
   208  			Fail(fmt.Sprintf("Unable to get pg_class stats for table '%s' on the restore database: %s", tableName, restoreErr))
   209  		}
   210  
   211  		if backupStats[0].Relpages != restoreStats[0].Relpages && backupStats[0].Reltuples != restoreStats[0].Reltuples {
   212  			Fail(fmt.Sprintf("The pg_class stats for table '%s' do not match: %v != %v", tableName, backupStats, restoreStats))
   213  		}
   214  	}
   215  }
   216  
   217  func assertSchemasExist(conn *dbconn.DBConn, expectedNumSchemas int) {
   218  	countQuery := `SELECT COUNT(n.nspname) FROM pg_catalog.pg_namespace n WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' ORDER BY 1;`
   219  	actualSchemaCount := dbconn.MustSelectString(conn, countQuery)
   220  	if strconv.Itoa(expectedNumSchemas) != actualSchemaCount {
   221  		Fail(fmt.Sprintf("Expected:\n\t%s schemas to exist in the DB\nActual:\n\t%s schemas are in the DB", strconv.Itoa(expectedNumSchemas), actualSchemaCount))
   222  	}
   223  }
   224  
   225  func assertRelationsCreated(conn *dbconn.DBConn, expectedNumTables int) {
   226  	countQuery := `SELECT count(*) AS string FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('S','v','r','p') AND n.nspname IN ('public', 'schema2');`
   227  	actualTableCount := dbconn.MustSelectString(conn, countQuery)
   228  	if strconv.Itoa(expectedNumTables) != actualTableCount {
   229  		Fail(fmt.Sprintf("Expected:\n\t%s relations to have been created\nActual:\n\t%s relations were created", strconv.Itoa(expectedNumTables), actualTableCount))
   230  	}
   231  }
   232  
   233  func assertRelationsCreatedInSchema(conn *dbconn.DBConn, schema string, expectedNumTables int) {
   234  	countQuery := fmt.Sprintf(`SELECT count(*) AS string FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('S','v','r','p') AND n.nspname = '%s'`, schema)
   235  	actualTableCount := dbconn.MustSelectString(conn, countQuery)
   236  	if strconv.Itoa(expectedNumTables) != actualTableCount {
   237  		Fail(fmt.Sprintf("Expected:\n\t%s relations to have been created\nActual:\n\t%s relations were created", strconv.Itoa(expectedNumTables), actualTableCount))
   238  	}
   239  }
   240  
   241  func assertRelationsExistForIncremental(conn *dbconn.DBConn, expectedNumTables int) {
   242  	countQuery := `SELECT count(*) AS string FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('S','v','r','p') AND n.nspname IN ('old_schema', 'new_schema');`
   243  	actualTableCount := dbconn.MustSelectString(conn, countQuery)
   244  	if strconv.Itoa(expectedNumTables) != actualTableCount {
   245  		Fail(fmt.Sprintf("Expected:\n\t%s relations to exist in old_schema and new_schema\nActual:\n\t%s relations are present", strconv.Itoa(expectedNumTables), actualTableCount))
   246  	}
   247  }
   248  
   249  func assertArtifactsCleaned(conn *dbconn.DBConn, timestamp string) {
   250  	cmdStr := fmt.Sprintf("ps -ef | grep -v grep | grep -E gpbackup_helper.*%s || true", timestamp)
   251  	output := mustRunCommand(exec.Command("bash", "-c", cmdStr))
   252  	Eventually(func() string { return strings.TrimSpace(string(output)) }, 10*time.Second, 100*time.Millisecond).Should(Equal(""))
   253  
   254  	fpInfo := filepath.NewFilePathInfo(backupCluster, "", timestamp, filepath.GetSegPrefix(conn))
   255  	description := "Checking if helper files are cleaned up properly"
   256  	cleanupFunc := func(contentID int) string {
   257  		errorFile := fmt.Sprintf("%s_error", fpInfo.GetSegmentPipeFilePath(contentID))
   258  		oidFile := fpInfo.GetSegmentHelperFilePath(contentID, "oid")
   259  		scriptFile := fpInfo.GetSegmentHelperFilePath(contentID, "script")
   260  		pipeFile := fpInfo.GetSegmentPipeFilePath(contentID)
   261  
   262  		return fmt.Sprintf("! ls %s && ! ls %s && ! ls %s && ! ls %s*", errorFile, oidFile, scriptFile, pipeFile)
   263  	}
   264  	remoteOutput := backupCluster.GenerateAndExecuteCommand(description, cluster.ON_SEGMENTS|cluster.INCLUDE_COORDINATOR, cleanupFunc)
   265  	if remoteOutput.NumErrors != 0 {
   266  		Fail(fmt.Sprintf("Helper files found for timestamp %s", timestamp))
   267  	}
   268  }
   269  
   270  func mustRunCommand(cmd *exec.Cmd) []byte {
   271  	output, err := cmd.CombinedOutput()
   272  	if err != nil {
   273  		testFailure = true
   274  		fmt.Printf("%s", output)
   275  		Fail(fmt.Sprintf("%v", err))
   276  	}
   277  	return output
   278  }
   279  
   280  func skipIfOldBackupVersionBefore(version string) {
   281  	if useOldBackupVersion && oldBackupSemVer.LT(semver.MustParse(version)) {
   282  		Skip(fmt.Sprintf("Feature not supported in gpbackup %s", oldBackupSemVer))
   283  	}
   284  }
   285  
   286  func createGlobalObjects(conn *dbconn.DBConn) {
   287  	testhelper.AssertQueryRuns(conn, "CREATE TABLESPACE test_tablespace LOCATION '/tmp/test_dir';")
   288  	testhelper.AssertQueryRuns(conn, "CREATE RESOURCE QUEUE test_queue WITH (ACTIVE_STATEMENTS=5);")
   289  	testhelper.AssertQueryRuns(conn, "CREATE ROLE global_role RESOURCE QUEUE test_queue;")
   290  	testhelper.AssertQueryRuns(conn, "CREATE ROLE testrole;")
   291  	testhelper.AssertQueryRuns(conn, "GRANT testrole TO global_role;")
   292  	testhelper.AssertQueryRuns(conn, "CREATE DATABASE global_db TABLESPACE test_tablespace;")
   293  	testhelper.AssertQueryRuns(conn, "ALTER DATABASE global_db OWNER TO global_role;")
   294  	testhelper.AssertQueryRuns(conn, "ALTER ROLE global_role SET search_path TO public,pg_catalog;")
   295  	testhelper.AssertQueryRuns(conn, "CREATE RESOURCE GROUP test_group WITH (CPU_RATE_LIMIT=1, MEMORY_LIMIT=1);")
   296  	testhelper.AssertQueryRuns(conn, "ALTER ROLE global_role RESOURCE GROUP test_group;")
   297  }
   298  
   299  func dropGlobalObjects(conn *dbconn.DBConn, dbExists bool) {
   300  	if dbExists {
   301  		testhelper.AssertQueryRuns(conn, "DROP DATABASE global_db;")
   302  	}
   303  	testhelper.AssertQueryRuns(conn, "DROP TABLESPACE test_tablespace;")
   304  	testhelper.AssertQueryRuns(conn, "DROP ROLE global_role;")
   305  	testhelper.AssertQueryRuns(conn, "DROP ROLE testrole;")
   306  	testhelper.AssertQueryRuns(conn, "DROP RESOURCE QUEUE test_queue;")
   307  	testhelper.AssertQueryRuns(conn, "DROP RESOURCE GROUP test_group;")
   308  }
   309  
   310  // fileSuffix should be one of: config.yaml, metadata.sql, toc.yaml, or report
   311  func getMetdataFileContents(backupDir string, timestamp string, fileSuffix string) []byte {
   312  	file, err := path.Glob(path.Join(backupDir, "*-1/backups", timestamp[:8], timestamp, fmt.Sprintf("gpbackup_%s_%s", timestamp, fileSuffix)))
   313  	Expect(err).ToNot(HaveOccurred())
   314  	fileContentBytes, err := ioutil.ReadFile(file[0])
   315  	Expect(err).ToNot(HaveOccurred())
   316  
   317  	return fileContentBytes
   318  }
   319  
   320  func saveHistory(myCluster *cluster.Cluster) {
   321  	// move history file out of the way, and replace in "after". This is because the
   322  	// history file might have newer backups, with more attributes, and thus the newer
   323  	// history could be a longer file than when read and rewritten by the old history
   324  	// code (the history code reads in history, inserts a new config at top, and writes
   325  	// the entire file). We have known bugs in the underlying common library about
   326  	// closing a file after reading, and also a bug with not using OS_TRUNC when opening
   327  	// a file for writing.
   328  
   329  	mdd := myCluster.GetDirForContent(-1)
   330  	historyFilePath = path.Join(mdd, "gpbackup_history.yaml")
   331  	_ = utils.CopyFile(historyFilePath, saveHistoryFilePath)
   332  }
   333  
   334  // Parse backup timestamp from gpbackup log string
   335  func getBackupTimestamp(output string) (string, error) {
   336  	r, err := regexp.Compile(`Backup Timestamp = (\d{14})`)
   337  	if err != nil {
   338  		return "", err
   339  	}
   340  	matches := r.FindStringSubmatch(output)
   341  	if len(matches) < 2 {
   342  		return "", errors.Errorf("unable to parse backup timestamp")
   343  	} else {
   344  		return r.FindStringSubmatch(output)[1], nil
   345  	}
   346  }
   347  
   348  func TestEndToEnd(t *testing.T) {
   349  	format.MaxLength = 0
   350  	RegisterFailHandler(Fail)
   351  	RunSpecs(t, "EndToEnd Suite")
   352  }
   353  
   354  var _ = BeforeSuite(func() {
   355  	// This is used to run tests from an older gpbackup version to gprestore latest
   356  	useOldBackupVersion = os.Getenv("OLD_BACKUP_VERSION") != ""
   357  	pluginConfigPath =
   358  		fmt.Sprintf("%s/src/github.com/tuhaihe/gpbackup/plugins/example_plugin_config.yaml",
   359  			os.Getenv("GOPATH"))
   360  	var err error
   361  	testhelper.SetupTestLogger()
   362  	_ = exec.Command("dropdb", "testdb").Run()
   363  	_ = exec.Command("dropdb", "restoredb").Run()
   364  	_ = exec.Command("psql", "postgres",
   365  		"-c", "DROP RESOURCE QUEUE test_queue").Run()
   366  
   367  	err = exec.Command("createdb", "testdb").Run()
   368  	if err != nil {
   369  		Fail(fmt.Sprintf("Could not create testdb: %v", err))
   370  	}
   371  	err = exec.Command("createdb", "restoredb").Run()
   372  	if err != nil {
   373  		Fail(fmt.Sprintf("Could not create restoredb: %v", err))
   374  	}
   375  	backupConn = testutils.SetupTestDbConn("testdb")
   376  	restoreConn = testutils.SetupTestDbConn("restoredb")
   377  	backupCmdFlags := pflag.NewFlagSet("gpbackup", pflag.ExitOnError)
   378  	backup.SetCmdFlags(backupCmdFlags)
   379  	backup.InitializeMetadataParams(backupConn)
   380  	backup.SetFilterRelationClause("")
   381  	testutils.ExecuteSQLFile(backupConn, "resources/test_tables_ddl.sql")
   382  	testutils.ExecuteSQLFile(backupConn, "resources/test_tables_data.sql")
   383  
   384  	// default GUC setting varies between versions so set it explicitly
   385  	testhelper.AssertQueryRuns(backupConn, "SET gp_autostats_mode='on_no_stats'")
   386  
   387  	if useOldBackupVersion {
   388  		oldBackupSemVer = semver.MustParse(os.Getenv("OLD_BACKUP_VERSION"))
   389  		oldBackupVersionStr := os.Getenv("OLD_BACKUP_VERSION")
   390  
   391  		_, restoreHelperPath, gprestorePath = buildAndInstallBinaries()
   392  
   393  		// Precompiled binaries will exist when running the ci job, `backward-compatibility`
   394  		if _, err := os.Stat(fmt.Sprintf("/tmp/%s", oldBackupVersionStr)); err == nil {
   395  			gpbackupPath = path.Join("/tmp", oldBackupVersionStr, "gpbackup")
   396  			backupHelperPath = path.Join("/tmp", oldBackupVersionStr, "gpbackup_helper")
   397  		} else {
   398  			gpbackupPath, backupHelperPath = buildOldBinaries(oldBackupVersionStr)
   399  		}
   400  	} else {
   401  		// Check if gpbackup binary has been installed using gppkg
   402  		gpHomeDir := operating.System.Getenv("GPHOME")
   403  		binDir := fmt.Sprintf("%s/go/bin", operating.System.Getenv("HOME"))
   404  		if _, err := os.Stat(fmt.Sprintf("%s/bin/gpbackup", gpHomeDir)); err == nil {
   405  			binDir = fmt.Sprintf("%s/bin", gpHomeDir)
   406  		}
   407  
   408  		gpbackupPath = fmt.Sprintf("%s/gpbackup", binDir)
   409  		gprestorePath = fmt.Sprintf("%s/gprestore", binDir)
   410  		backupHelperPath = fmt.Sprintf("%s/gpbackup_helper", binDir)
   411  		restoreHelperPath = backupHelperPath
   412  	}
   413  	segConfig := cluster.MustGetSegmentConfiguration(backupConn)
   414  	backupCluster = cluster.NewCluster(segConfig)
   415  
   416  	if false {
   417  		testutils.SetupTestFilespace(backupConn, backupCluster)
   418  	} else {
   419  		remoteOutput := backupCluster.GenerateAndExecuteCommand(
   420  			"Creating filespace test directories on all hosts",
   421  			cluster.ON_HOSTS|cluster.INCLUDE_COORDINATOR,
   422  			func(contentID int) string {
   423  				return fmt.Sprintf("mkdir -p /tmp/test_dir && mkdir -p /tmp/test_dir1 && mkdir -p /tmp/test_dir2")
   424  			})
   425  		if remoteOutput.NumErrors != 0 {
   426  			Fail("Could not create filespace test directory on 1 or more hosts")
   427  		}
   428  	}
   429  
   430  	saveHistory(backupCluster)
   431  
   432  	err = os.MkdirAll(customBackupDir, 0777)
   433  	if err != nil {
   434  		Fail(fmt.Sprintf("Failed to create directory: %s. Error: %s", customBackupDir, err.Error()))
   435  	}
   436  	// Flag validation
   437  	_, err = os.Stat(customBackupDir)
   438  	if os.IsNotExist(err) {
   439  		Fail(fmt.Sprintf("Custom backup directory %s does not exist.", customBackupDir))
   440  	}
   441  	// capture cluster size for resize tests
   442  	segmentCount = len(backupCluster.Segments) - 1
   443  
   444  })
   445  
   446  var _ = AfterSuite(func() {
   447  	if testFailure {
   448  		return
   449  	}
   450  	_ = utils.CopyFile(saveHistoryFilePath, historyFilePath)
   451  
   452  	if false {
   453  		testutils.DestroyTestFilespace(backupConn)
   454  	} else {
   455  		_ = exec.Command("psql", "postgres",
   456  			"-c", "DROP RESOURCE QUEUE test_queue").Run()
   457  		_ = exec.Command("psql", "postgres",
   458  			"-c", "DROP TABLESPACE test_tablespace").Run()
   459  		remoteOutput := backupCluster.GenerateAndExecuteCommand(
   460  			"Removing /tmp/test_dir* directories on all hosts",
   461  			cluster.ON_HOSTS|cluster.INCLUDE_COORDINATOR,
   462  			func(contentID int) string {
   463  				return fmt.Sprintf("rm -rf /tmp/test_dir*")
   464  			})
   465  		if remoteOutput.NumErrors != 0 {
   466  			Fail("Could not remove /tmp/testdir* directories on 1 or more hosts")
   467  		}
   468  	}
   469  	if backupConn != nil {
   470  		backupConn.Close()
   471  	}
   472  	if restoreConn != nil {
   473  		restoreConn.Close()
   474  	}
   475  	CleanupBuildArtifacts()
   476  	err := exec.Command("dropdb", "testdb").Run()
   477  	if err != nil {
   478  		fmt.Printf("Could not drop testdb: %v\n", err)
   479  	}
   480  	err = exec.Command("dropdb", "restoredb").Run()
   481  	if err != nil {
   482  		fmt.Printf("Could not drop restoredb: %v\n", err)
   483  	}
   484  })
   485  
   486  func end_to_end_setup() {
   487  	testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA IF EXISTS schema2 CASCADE; DROP SCHEMA public CASCADE; CREATE SCHEMA public;")
   488  	publicSchemaTupleCounts = map[string]int{
   489  		"public.foo":   40000,
   490  		"public.holds": 50000,
   491  		"public.sales": 13,
   492  	}
   493  	schema2TupleCounts = map[string]int{
   494  		"schema2.returns": 6,
   495  		"schema2.foo2":    0,
   496  		"schema2.foo3":    100,
   497  		"schema2.ao1":     1000,
   498  		"schema2.ao2":     1000,
   499  	}
   500  
   501  	// note that BeforeSuite has saved off history file, in case of running on
   502  	// workstation where we want to retain normal (non-test?) history
   503  	// we remove in order to work around an old common-library bug in closing a
   504  	// file after writing, and truncating when opening to write, both of which
   505  	// manifest as a broken history file in old code
   506  	_ = os.Remove(historyFilePath)
   507  
   508  	// Assign a unique directory for each test
   509  	backupDir, _ = ioutil.TempDir(customBackupDir, "temp")
   510  }
   511  
   512  func end_to_end_teardown() {
   513  	_ = os.RemoveAll(backupDir)
   514  }
   515  
   516  var _ = Describe("backup and restore end to end tests", func() {
   517  	BeforeEach(func() {
   518  		end_to_end_setup()
   519  	})
   520  	AfterEach(func() {
   521  		end_to_end_teardown()
   522  	})
   523  
   524  	Describe("globals tests", func() {
   525  		It("runs gpbackup and gprestore with --with-globals", func() {
   526  			Skip("Cloudberry skip")
   527  			skipIfOldBackupVersionBefore("1.8.2")
   528  			createGlobalObjects(backupConn)
   529  
   530  			timestamp := gpbackup(gpbackupPath, backupHelperPath)
   531  
   532  			dropGlobalObjects(backupConn, true)
   533  			defer dropGlobalObjects(backupConn, false)
   534  
   535  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   536  				"--redirect-db", "restoredb",
   537  				"--with-globals")
   538  		})
   539  		It("runs gpbackup and gprestore with --with-globals and --create-db", func() {
   540  			Skip("Cloudberry skip")
   541  			skipIfOldBackupVersionBefore("1.8.2")
   542  			createGlobalObjects(backupConn)
   543  			testhelper.AssertQueryRuns(backupConn,
   544  					"ALTER ROLE global_role IN DATABASE global_db SET search_path TO public,pg_catalog;")
   545  
   546  			timestamp := gpbackup(gpbackupPath, backupHelperPath)
   547  			dropGlobalObjects(backupConn, true)
   548  			defer dropGlobalObjects(backupConn, true)
   549  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   550  				"--redirect-db", "global_db",
   551  				"--with-globals",
   552  				"--create-db")
   553  		})
   554  		It("runs gpbackup with --without-globals", func() {
   555  			Skip("Cloudberry skip")
   556  			skipIfOldBackupVersionBefore("1.18.0")
   557  			createGlobalObjects(backupConn)
   558  			defer dropGlobalObjects(backupConn, true)
   559  
   560  			timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--without-globals")
   561  
   562  			configFileContents := getMetdataFileContents(backupDir, timestamp, "config.yaml")
   563  			Expect(string(configFileContents)).To(ContainSubstring("withoutglobals: true"))
   564  
   565  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
   566  			Expect(string(metadataFileContents)).ToNot(ContainSubstring("CREATE ROLE testrole"))
   567  
   568  			tocFileContents := getMetdataFileContents(backupDir, timestamp, "toc.yaml")
   569  			tocStruct := &toc.TOC{}
   570  			err := yaml.Unmarshal(tocFileContents, tocStruct)
   571  			Expect(err).ToNot(HaveOccurred())
   572  			Expect(len(tocStruct.GlobalEntries)).To(Equal(1))
   573  			Expect(tocStruct.GlobalEntries[0].ObjectType).To(Equal("SESSION GUCS"))
   574  		})
   575  		It("runs gpbackup with --without-globals and --metadata-only", func() {
   576  			Skip("Cloudberry skip")
   577  			skipIfOldBackupVersionBefore("1.18.0")
   578  			createGlobalObjects(backupConn)
   579  			defer dropGlobalObjects(backupConn, true)
   580  
   581  			timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--without-globals", "--metadata-only")
   582  
   583  			configFileContents := getMetdataFileContents(backupDir, timestamp, "config.yaml")
   584  			Expect(string(configFileContents)).To(ContainSubstring("withoutglobals: true"))
   585  
   586  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
   587  			Expect(string(metadataFileContents)).ToNot(ContainSubstring("CREATE ROLE testrole"))
   588  
   589  			tocFileContents := getMetdataFileContents(backupDir, timestamp, "toc.yaml")
   590  			tocStruct := &toc.TOC{}
   591  			err := yaml.Unmarshal(tocFileContents, tocStruct)
   592  			Expect(err).ToNot(HaveOccurred())
   593  			Expect(len(tocStruct.GlobalEntries)).To(Equal(1))
   594  			Expect(tocStruct.GlobalEntries[0].ObjectType).To(Equal("SESSION GUCS"))
   595  		})
   596  	})
   597  	Describe(`On Error Continue`, func() {
   598  		It(`gprestore continues when encountering errors during data load with --single-data-file and --on-error-continue`, func() {
   599  			Skip("Cloudberry skip")
   600  			if segmentCount != 3 {
   601  				Skip("Restoring from a tarred backup currently requires a 3-segment cluster to test.")
   602  			}
   603  
   604  			// This backup is corrupt because the data for a single row on
   605  			// segment0 was changed so that the value stored in the row is
   606  			// 9 instead of 1.  This will cause an issue when COPY FROM
   607  			// attempts to restore this data because it will error out
   608  			// stating it belongs to a different segment. This backup was
   609  			// taken with gpbackup version 1.12.1 and GPDB version 4.3.33.2
   610  
   611  			command := exec.Command("tar", "-xzf", "resources/corrupt-db.tar.gz", "-C", backupDir)
   612  			mustRunCommand(command)
   613  
   614  			gprestoreCmd := exec.Command(gprestorePath,
   615  				"--timestamp", "20190809230424",
   616  				"--redirect-db", "restoredb",
   617  				"--backup-dir", path.Join(backupDir, "corrupt-db"),
   618  				"--on-error-continue")
   619  			_, err := gprestoreCmd.CombinedOutput()
   620  			Expect(err).To(HaveOccurred())
   621  
   622  			assertRelationsCreated(restoreConn, 3)
   623  			// Expect corrupt_table to have 0 tuples because data load should have failed due violation of distribution key constraint.
   624  			assertDataRestored(restoreConn, map[string]int{
   625  				"public.corrupt_table": 0,
   626  				"public.good_table1":   10,
   627  				"public.good_table2":   10})
   628  		})
   629  		It(`Creates skip file on segments for corrupted table for helpers to discover the file and skip it with --single-data-file and --on-error-continue`, func() {
   630  			Skip("Cloudberry skip")
   631  			if segmentCount != 3 {
   632  				Skip("Restoring from a tarred backup currently requires a 3-segment cluster to test.")
   633  			}
   634  
   635  			command := exec.Command("tar", "-xzf", "resources/corrupt-db.tar.gz", "-C", backupDir)
   636  			mustRunCommand(command)
   637  
   638  			testhelper.AssertQueryRuns(restoreConn,
   639  				"CREATE TABLE public.corrupt_table (i integer);")
   640  			defer testhelper.AssertQueryRuns(restoreConn,
   641  				"DROP TABLE public.corrupt_table")
   642  
   643  			// we know that broken value goes to seg2, so seg1 should be
   644  			// ok. Connect in utility mode to seg1.
   645  			segmentOne := backupCluster.ByContent[1]
   646  			port := segmentOne[0].Port
   647  			segConn := testutils.SetupTestDBConnSegment("restoredb", port, backupConn.Version)
   648  			defer segConn.Close()
   649  
   650  			// Take ACCESS EXCLUSIVE LOCK on public.corrupt_table which will
   651  			// make COPY on seg1 block until the lock is released. By that
   652  			// time, COPY on seg2 will fail and gprestore will create a skip
   653  			// file for public.corrupt_table. When the lock is released on seg1,
   654  			// the restore helper should discover the file and skip the table.
   655  			segConn.Begin(0)
   656  			segConn.Exec("LOCK TABLE public.corrupt_table IN ACCESS EXCLUSIVE MODE;")
   657  
   658  			gprestoreCmd := exec.Command(gprestorePath,
   659  				"--timestamp", "20190809230424",
   660  				"--redirect-db", "restoredb",
   661  				"--backup-dir", path.Join(backupDir, "corrupt-db"),
   662  				"--data-only", "--on-error-continue",
   663  				"--include-table", "public.corrupt_table")
   664  			_, err := gprestoreCmd.CombinedOutput()
   665  			Expect(err).To(HaveOccurred())
   666  
   667  			segConn.Commit(0)
   668  			homeDir := os.Getenv("HOME")
   669  			helperLogs, _ := path.Glob(path.Join(homeDir, "gpAdminLogs/gpbackup_helper*"))
   670  			cmdStr := fmt.Sprintf("tail -n 40 %s | grep \"Skip file has been discovered for entry\" || true", helperLogs[len(helperLogs)-1])
   671  
   672  			attemts := 1000
   673  			err = errors.New("Timeout to discover skip file")
   674  			for attemts > 0 {
   675  				output := mustRunCommand(exec.Command("bash", "-c", cmdStr))
   676  				if strings.TrimSpace(string(output)) == "" {
   677  					time.Sleep(5 * time.Millisecond)
   678  					attemts--
   679  				} else {
   680  					err = nil
   681  					break
   682  				}
   683  			}
   684  			Expect(err).NotTo(HaveOccurred())
   685  		})
   686  		It(`ensure gprestore on corrupt backup with --on-error-continue logs error tables`, func() {
   687  			Skip("Cloudberry skip")
   688  			if segmentCount != 3 {
   689  				Skip("Restoring from a tarred backup currently requires a 3-segment cluster to test.")
   690  			}
   691  			command := exec.Command("tar", "-xzf",
   692  				"resources/corrupt-db.tar.gz", "-C", backupDir)
   693  			mustRunCommand(command)
   694  
   695  			// Restore command with data error
   696  			// Metadata errors due to invalid alter ownership
   697  			expectedErrorTablesData := []string{"public.corrupt_table"}
   698  			expectedErrorTablesMetadata := []string{
   699  				"public.corrupt_table", "public.good_table1", "public.good_table2"}
   700  			gprestoreCmd := exec.Command(gprestorePath,
   701  				"--timestamp", "20190809230424",
   702  				"--redirect-db", "restoredb",
   703  				"--backup-dir", path.Join(backupDir, "corrupt-db"),
   704  				"--on-error-continue")
   705  			_, _ = gprestoreCmd.CombinedOutput()
   706  
   707  			files, _ := path.Glob(path.Join(backupDir, "/corrupt-db/", "*-1/backups/*",
   708  				"20190809230424", "*error_tables*"))
   709  			Expect(files).To(HaveLen(2))
   710  
   711  			Expect(files[0]).To(HaveSuffix("_data"))
   712  			contents, err := ioutil.ReadFile(files[0])
   713  			Expect(err).ToNot(HaveOccurred())
   714  			tables := strings.Split(string(contents), "\n")
   715  			Expect(tables).To(Equal(expectedErrorTablesData))
   716  			_ = os.Remove(files[0])
   717  
   718  			Expect(files).To(HaveLen(2))
   719  			Expect(files[1]).To(HaveSuffix("_metadata"))
   720  			contents, err = ioutil.ReadFile(files[1])
   721  			Expect(err).ToNot(HaveOccurred())
   722  			tables = strings.Split(string(contents), "\n")
   723  			sort.Strings(tables)
   724  			Expect(tables).To(Equal(expectedErrorTablesMetadata))
   725  			_ = os.Remove(files[1])
   726  
   727  			// Restore command with tables containing multiple metadata errors
   728  			// This test is to ensure we don't have tables with multiple errors show up twice
   729  			gprestoreCmd = exec.Command(gprestorePath,
   730  				"--timestamp", "20190809230424",
   731  				"--redirect-db", "restoredb",
   732  				"--backup-dir", path.Join(backupDir, "corrupt-db"),
   733  				"--metadata-only",
   734  				"--on-error-continue")
   735  			_, _ = gprestoreCmd.CombinedOutput()
   736  			expectedErrorTablesMetadata = []string{
   737  				"public.corrupt_table", "public.good_table1", "public.good_table2"}
   738  			files, _ = path.Glob(path.Join(backupDir, "/corrupt-db/",
   739  				"*-1/backups/*", "20190809230424", "*error_tables*"))
   740  			Expect(files).To(HaveLen(1))
   741  			Expect(files[0]).To(HaveSuffix("_metadata"))
   742  			contents, err = ioutil.ReadFile(files[0])
   743  			Expect(err).ToNot(HaveOccurred())
   744  			tables = strings.Split(string(contents), "\n")
   745  			sort.Strings(tables)
   746  			Expect(tables).To(HaveLen(len(expectedErrorTablesMetadata)))
   747  			_ = os.Remove(files[0])
   748  		})
   749  		It(`ensure successful gprestore with --on-error-continue does not log error tables`, func() {
   750  			// Ensure no error tables with successful restore
   751  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
   752  				"--no-compression",
   753  				"--backup-dir", backupDir)
   754  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   755  				"--redirect-db", "restoredb",
   756  				"--backup-dir", backupDir,
   757  				"--on-error-continue")
   758  			files, err := path.Glob(path.Join(backupDir, "*-1/backups/*", timestamp, "_error_tables*"))
   759  			Expect(err).ToNot(HaveOccurred())
   760  			Expect(files).To(HaveLen(0))
   761  		})
   762  	})
   763  	Describe("Redirect Schema", func() {
   764  		It("runs gprestore with --redirect-schema restoring data and statistics to the new schema", func() {
   765  			skipIfOldBackupVersionBefore("1.17.0")
   766  			testhelper.AssertQueryRuns(restoreConn,
   767  				"DROP SCHEMA IF EXISTS schema3 CASCADE; CREATE SCHEMA schema3;")
   768  			defer testhelper.AssertQueryRuns(restoreConn,
   769  				"DROP SCHEMA schema3 CASCADE")
   770  			testhelper.AssertQueryRuns(backupConn,
   771  				"CREATE INDEX foo3_idx1 ON schema2.foo3(i)")
   772  			defer testhelper.AssertQueryRuns(backupConn,
   773  				"DROP INDEX schema2.foo3_idx1")
   774  			testhelper.AssertQueryRuns(backupConn,
   775  				"ANALYZE schema2.foo3")
   776  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
   777  				"--with-stats")
   778  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   779  				"--redirect-db", "restoredb",
   780  				"--include-table", "schema2.foo3",
   781  				"--redirect-schema", "schema3",
   782  				"--with-stats")
   783  
   784  			schema3TupleCounts := map[string]int{
   785  				"schema3.foo3": 100,
   786  			}
   787  			assertDataRestored(restoreConn, schema3TupleCounts)
   788  			assertPGClassStatsRestored(restoreConn, restoreConn, schema3TupleCounts)
   789  
   790  			actualIndexCount := dbconn.MustSelectString(restoreConn,
   791  				`SELECT count(*) AS string FROM pg_indexes WHERE schemaname='schema3' AND indexname='foo3_idx1';`)
   792  			Expect(actualIndexCount).To(Equal("1"))
   793  
   794  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
   795  				`SELECT count(*) FROM pg_statistic WHERE starelid='schema3.foo3'::regclass::oid;`)
   796  			Expect(actualStatisticCount).To(Equal("1"))
   797  		})
   798  		It("runs gprestore with --redirect-schema to redirect data back to the original database which still contain the original tables", func() {
   799  			skipIfOldBackupVersionBefore("1.17.0")
   800  			testhelper.AssertQueryRuns(backupConn,
   801  				"DROP SCHEMA IF EXISTS schema3 CASCADE; CREATE SCHEMA schema3;")
   802  			defer testhelper.AssertQueryRuns(backupConn,
   803  				"DROP SCHEMA schema3 CASCADE")
   804  			testhelper.AssertQueryRuns(backupConn,
   805  				"CREATE INDEX foo3_idx1 ON schema2.foo3(i)")
   806  			defer testhelper.AssertQueryRuns(backupConn,
   807  				"DROP INDEX schema2.foo3_idx1")
   808  			testhelper.AssertQueryRuns(backupConn,
   809  				"ANALYZE schema2.foo3")
   810  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
   811  				"--with-stats")
   812  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   813  				"--include-table", "schema2.foo3",
   814  				"--redirect-schema", "schema3",
   815  				"--with-stats")
   816  
   817  			schema3TupleCounts := map[string]int{
   818  				"schema3.foo3": 100,
   819  			}
   820  			assertDataRestored(backupConn, schema3TupleCounts)
   821  			assertPGClassStatsRestored(backupConn, backupConn, schema3TupleCounts)
   822  
   823  			actualIndexCount := dbconn.MustSelectString(backupConn,
   824  				`SELECT count(*) AS string FROM pg_indexes WHERE schemaname='schema3' AND indexname='foo3_idx1';`)
   825  			Expect(actualIndexCount).To(Equal("1"))
   826  
   827  			actualStatisticCount := dbconn.MustSelectString(backupConn,
   828  				`SELECT count(*) FROM pg_statistic WHERE starelid='schema3.foo3'::regclass::oid;`)
   829  			Expect(actualStatisticCount).To(Equal("1"))
   830  		})
   831  		It("runs gprestore with --redirect-schema and multiple included schemas", func() {
   832  			skipIfOldBackupVersionBefore("1.17.0")
   833  			testhelper.AssertQueryRuns(restoreConn,
   834  				"DROP SCHEMA IF EXISTS schema3 CASCADE; CREATE SCHEMA schema3;")
   835  			defer testhelper.AssertQueryRuns(restoreConn,
   836  				"DROP SCHEMA schema3 CASCADE")
   837  			testhelper.AssertQueryRuns(backupConn,
   838  				"CREATE SCHEMA \"FOO\"")
   839  			defer testhelper.AssertQueryRuns(backupConn,
   840  				"DROP SCHEMA \"FOO\" CASCADE")
   841  			testhelper.AssertQueryRuns(backupConn,
   842  				"CREATE TABLE \"FOO\".bar(i int)")
   843  
   844  			tableFile := path.Join(backupDir, "test-table-file.txt")
   845  			includeFile := iohelper.MustOpenFileForWriting(tableFile)
   846  			utils.MustPrintln(includeFile,
   847  				"public.sales\nschema2.foo2\nschema2.ao1")
   848  			utils.MustPrintln(includeFile,
   849  				"public.sales\nschema2.foo2\nschema2.ao1\nFOO.bar")
   850  			timestamp := gpbackup(gpbackupPath, backupHelperPath)
   851  
   852  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   853  				"--include-table-file", tableFile,
   854  				"--redirect-db", "restoredb",
   855  				"--redirect-schema", "schema3")
   856  
   857  			schema3TupleCounts := map[string]int{
   858  				"schema3.foo2":  0,
   859  				"schema3.ao1":   1000,
   860  				"schema3.sales": 13,
   861  				"schema3.bar":   0,
   862  			}
   863  			assertDataRestored(restoreConn, schema3TupleCounts)
   864  			assertRelationsCreatedInSchema(restoreConn, "schema2", 0)
   865  		})
   866  		It("runs --redirect-schema with --matadata-only", func() {
   867  			skipIfOldBackupVersionBefore("1.17.0")
   868  			testhelper.AssertQueryRuns(restoreConn,
   869  				"DROP SCHEMA IF EXISTS schema_to_redirect CASCADE; CREATE SCHEMA \"schema_to_redirect\";")
   870  			defer testhelper.AssertQueryRuns(restoreConn,
   871  				"DROP SCHEMA schema_to_redirect CASCADE")
   872  			testhelper.AssertQueryRuns(backupConn,
   873  				"CREATE SCHEMA schema_to_test")
   874  			defer testhelper.AssertQueryRuns(backupConn,
   875  				"DROP SCHEMA schema_to_test CASCADE")
   876  			testhelper.AssertQueryRuns(backupConn,
   877  				"CREATE TABLE schema_to_test.table_metadata_only AS SELECT generate_series(1,10)")
   878  			timestamp := gpbackup(gpbackupPath, backupHelperPath, "--metadata-only", "--include-schema", "schema_to_test")
   879  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   880  				"--redirect-db", "restoredb",
   881  				"--redirect-schema", "schema_to_redirect",
   882  				"--include-table", "schema_to_test.table_metadata_only",
   883  				"--metadata-only")
   884  			assertRelationsCreatedInSchema(restoreConn, "schema_to_redirect", 1)
   885  			assertDataRestored(restoreConn, map[string]int{"schema_to_redirect.table_metadata_only": 0})
   886  		})
   887  		It("runs --redirect-schema with --include-schema and --include-schema-file", func() {
   888  			Skip("Cloudberry skip")
   889  			skipIfOldBackupVersionBefore("1.17.0")
   890  			testhelper.AssertQueryRuns(restoreConn,
   891  				"DROP SCHEMA IF EXISTS schema3 CASCADE; CREATE SCHEMA schema3;")
   892  			defer testhelper.AssertQueryRuns(restoreConn,
   893  				"DROP SCHEMA schema3 CASCADE")
   894  			testhelper.AssertQueryRuns(backupConn,
   895  				"CREATE SCHEMA fooschema")
   896  			defer testhelper.AssertQueryRuns(backupConn,
   897  				"DROP SCHEMA fooschema CASCADE")
   898  			testhelper.AssertQueryRuns(backupConn,
   899  				"CREATE TABLE fooschema.redirected_table(i int)")
   900  
   901  			schemaFile := path.Join(backupDir, "test-schema-file.txt")
   902  			includeSchemaFd := iohelper.MustOpenFileForWriting(schemaFile)
   903  			utils.MustPrintln(includeSchemaFd, "fooschema")
   904  
   905  			timestamp := gpbackup(gpbackupPath, backupHelperPath)
   906  
   907  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   908  				"--include-schema-file", schemaFile,
   909  				"--include-schema", "schema2",
   910  				"--redirect-db", "restoredb",
   911  				"--redirect-schema", "schema3")
   912  
   913  			expectedSchema3TupleCounts := map[string]int{
   914  				"schema3.returns":          6,
   915  				"schema3.foo2":             0,
   916  				"schema3.foo3":             100,
   917  				"schema3.ao1":              1000,
   918  				"schema3.ao2":              1000,
   919  				"schema3.redirected_table": 0,
   920  			}
   921  			assertDataRestored(restoreConn, expectedSchema3TupleCounts)
   922  			assertRelationsCreatedInSchema(restoreConn, "public", 0)
   923  			assertRelationsCreatedInSchema(restoreConn, "schema2", 0)
   924  			assertRelationsCreatedInSchema(restoreConn, "fooschema", 0)
   925  		})
   926  	})
   927  	Describe("ACLs for extensions", func() {
   928  		It("runs gpbackup and gprestores any user defined ACLs on extensions", func() {
   929  			testutils.SkipIfBefore5(backupConn)
   930  			skipIfOldBackupVersionBefore("1.17.0")
   931  			currentUser := os.Getenv("USER")
   932  			testhelper.AssertQueryRuns(backupConn, "CREATE ROLE testrole")
   933  			defer testhelper.AssertQueryRuns(backupConn,
   934  				"DROP ROLE testrole")
   935  			testhelper.AssertQueryRuns(backupConn, "CREATE EXTENSION pgcrypto")
   936  			defer testhelper.AssertQueryRuns(backupConn,
   937  				"DROP EXTENSION pgcrypto")
   938  			// Create a grant on a function that belongs to the extension
   939  			testhelper.AssertQueryRuns(backupConn,
   940  				"GRANT EXECUTE ON FUNCTION gen_random_bytes(integer) to testrole WITH GRANT OPTION")
   941  
   942  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
   943  				"--metadata-only")
   944  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   945  				"--redirect-db", "restoredb")
   946  
   947  			extensionMetadata := backup.ObjectMetadata{
   948  				ObjectType: "FUNCTION", Privileges: []backup.ACL{
   949  					{Grantee: "", Execute: true},
   950  					{Grantee: currentUser, Execute: true},
   951  					{Grantee: "testrole", ExecuteWithGrant: true},
   952  				}, Owner: currentUser}
   953  
   954  			// Check for the corresponding grants in restored database
   955  			uniqueID := testutils.UniqueIDFromObjectName(restoreConn,
   956  				"public", "gen_random_bytes", backup.TYPE_FUNCTION)
   957  			resultMetadataMap := backup.GetMetadataForObjectType(restoreConn, backup.TYPE_FUNCTION)
   958  
   959  			Expect(resultMetadataMap).To(Not(BeEmpty()))
   960  			resultMetadata := resultMetadataMap[uniqueID]
   961  			match, err := structmatcher.MatchStruct(&extensionMetadata).Match(&resultMetadata)
   962  			Expect(err).To(Not(HaveOccurred()))
   963  			Expect(match).To(BeTrue())
   964  			// Following statement is needed in order to drop testrole
   965  			testhelper.AssertQueryRuns(restoreConn, "DROP EXTENSION pgcrypto")
   966  			assertArtifactsCleaned(restoreConn, timestamp)
   967  		})
   968  	})
   969  	Describe("Restore with truncate-table", func() {
   970  		It("runs gpbackup and gprestore with truncate-table and include-table flags", func() {
   971  			timestamp := gpbackup(gpbackupPath, backupHelperPath)
   972  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   973  				"--redirect-db", "restoredb",
   974  				"--include-table", "public.sales")
   975  			assertDataRestored(restoreConn, map[string]int{
   976  				"public.sales": 13})
   977  
   978  			testhelper.AssertQueryRuns(restoreConn,
   979  				"INSERT into sales values(1, '2017-01-01', 109.99)")
   980  			time.Sleep(1 * time.Second)
   981  
   982  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   983  				"--redirect-db", "restoredb",
   984  				"--include-table", "public.sales",
   985  				"--truncate-table", "--data-only")
   986  			assertDataRestored(restoreConn, map[string]int{
   987  				"public.sales": 13})
   988  		})
   989  		It("runs gpbackup and gprestore with truncate-table and include-table-file flags", func() {
   990  			includeFile := iohelper.MustOpenFileForWriting("/tmp/include-tables.txt")
   991  			utils.MustPrintln(includeFile, "public.sales")
   992  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
   993  				"--backup-dir", backupDir)
   994  			gprestore(gprestorePath, restoreHelperPath, timestamp,
   995  				"--redirect-db", "restoredb",
   996  				"--backup-dir", backupDir,
   997  				"--include-table-file", "/tmp/include-tables.txt")
   998  			assertDataRestored(restoreConn, map[string]int{
   999  				"public.sales": 13})
  1000  
  1001  			testhelper.AssertQueryRuns(restoreConn,
  1002  				"INSERT into sales values(1, '2017-01-01', 99.99)")
  1003  			time.Sleep(1 * time.Second)
  1004  
  1005  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1006  				"--redirect-db", "restoredb",
  1007  				"--backup-dir", backupDir,
  1008  				"--include-table-file", "/tmp/include-tables.txt",
  1009  				"--truncate-table", "--data-only")
  1010  			assertDataRestored(restoreConn, map[string]int{
  1011  				"public.sales": 13})
  1012  
  1013  			_ = os.Remove("/tmp/include-tables.txt")
  1014  		})
  1015  		It("runs gpbackup and gprestore with truncate-table flag against a leaf partition", func() {
  1016  			skipIfOldBackupVersionBefore("1.7.2")
  1017  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1018  				"--leaf-partition-data")
  1019  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1020  				"--redirect-db", "restoredb",
  1021  				"--include-table", "public.sales_1_prt_jan17")
  1022  
  1023  			testhelper.AssertQueryRuns(restoreConn,
  1024  				"INSERT into public.sales_1_prt_jan17 values(1, '2017-01-01', 99.99)")
  1025  			time.Sleep(1 * time.Second)
  1026  
  1027  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1028  				"--redirect-db", "restoredb",
  1029  				"--include-table", "public.sales_1_prt_jan17",
  1030  				"--truncate-table", "--data-only")
  1031  			assertDataRestored(restoreConn, map[string]int{
  1032  				"public.sales": 1, "public.sales_1_prt_jan17": 1})
  1033  		})
  1034  	})
  1035  	Describe("Restore with --run-analyze", func() {
  1036  		It("runs gprestore without --run-analyze", func() {
  1037  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1038  				"--include-table", "public.sales")
  1039  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1040  				"--redirect-db", "restoredb")
  1041  
  1042  			// Since --run-analyze was not used, there should be no statistics
  1043  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
  1044  				`SELECT count(*) FROM pg_statistic WHERE starelid='public.sales'::regclass::oid`)
  1045  			Expect(actualStatisticCount).To(Equal("0"))
  1046  		})
  1047  		It("runs gprestore with --run-analyze", func() {
  1048  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1049  				"--include-table", "public.sales")
  1050  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1051  				"--redirect-db", "restoredb",
  1052  				"--run-analyze")
  1053  
  1054  			// Since --run-analyze was used, there should be stats
  1055  			// for all 3 columns of the sales partition table
  1056  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
  1057  				`SELECT count(*) FROM pg_statistic WHERE starelid='public.sales'::regclass::oid`)
  1058  			Expect(actualStatisticCount).To(Equal("3"))
  1059  		})
  1060  		It("runs gprestore with --run-analyze and --redirect-schema", func() {
  1061  			skipIfOldBackupVersionBefore("1.17.0")
  1062  			testhelper.AssertQueryRuns(restoreConn, "CREATE SCHEMA fooschema")
  1063  			defer testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA fooschema CASCADE")
  1064  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1065  				"--include-table", "public.sales")
  1066  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1067  				"--redirect-db", "restoredb",
  1068  				"--include-table", "public.sales",
  1069  				"--redirect-schema", "fooschema",
  1070  				"--run-analyze")
  1071  
  1072  			// Since --run-analyze was used, there should be stats
  1073  			// for all 3 columns of the sales partition table.
  1074  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
  1075  				`SELECT count(*) FROM pg_statistic WHERE starelid='fooschema.sales'::regclass::oid`)
  1076  			Expect(actualStatisticCount).To(Equal("3"))
  1077  		})
  1078  		It("runs gpbackup with --leaf-partition-data and gprestore with --run-analyze", func() {
  1079  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1080  				"--include-table", "public.sales", "--leaf-partition-data")
  1081  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1082  				"--redirect-db", "restoredb",
  1083  				"--run-analyze")
  1084  
  1085  			// Since --run-analyze was used, there should be stats
  1086  			// for all 3 columns of the sales partition table. The
  1087  			// leaf partition stats should merge up to the root
  1088  			// partition.
  1089  			actualStatisticCount := dbconn.MustSelectString(restoreConn,
  1090  				`SELECT count(*) FROM pg_statistic WHERE starelid='public.sales'::regclass::oid`)
  1091  			Expect(actualStatisticCount).To(Equal("3"))
  1092  		})
  1093  	})
  1094  	Describe("Flag combinations", func() {
  1095  		It("runs gpbackup and gprestore without redirecting restore to another db", func() {
  1096  			err := exec.Command("createdb", "recreateme").Run()
  1097  			if err != nil {
  1098  				Fail(fmt.Sprintf("%v", err))
  1099  			}
  1100  
  1101  			// Specifying the recreateme database will override the default DB, testdb
  1102  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1103  				"--dbname", "recreateme")
  1104  
  1105  			err = exec.Command("dropdb", "recreateme").Run()
  1106  			if err != nil {
  1107  				Fail(fmt.Sprintf("%v", err))
  1108  			}
  1109  
  1110  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1111  				"--create-db")
  1112  			recreatemeConn := testutils.SetupTestDbConn("recreateme")
  1113  			recreatemeConn.Close()
  1114  
  1115  			err = exec.Command("dropdb", "recreateme").Run()
  1116  			if err != nil {
  1117  				Fail(fmt.Sprintf("%v", err))
  1118  			}
  1119  		})
  1120  		It("runs basic gpbackup and gprestore with metadata and data-only flags", func() {
  1121  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1122  				"--metadata-only")
  1123  			timestamp2 := gpbackup(gpbackupPath, backupHelperPath,
  1124  				"--data-only")
  1125  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1126  				"--redirect-db", "restoredb")
  1127  			assertDataRestored(restoreConn, map[string]int{
  1128  				"public.foo": 0, "schema2.foo3": 0})
  1129  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1130  			gprestore(gprestorePath, restoreHelperPath, timestamp2,
  1131  				"--redirect-db", "restoredb")
  1132  
  1133  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1134  			assertDataRestored(restoreConn, schema2TupleCounts)
  1135  		})
  1136  		It("runs gpbackup and gprestore with metadata-only backup flag", func() {
  1137  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1138  				"--metadata-only")
  1139  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1140  				"--redirect-db", "restoredb")
  1141  
  1142  			assertDataRestored(restoreConn, map[string]int{
  1143  				"public.foo": 0, "schema2.foo3": 0})
  1144  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1145  		})
  1146  		It("runs gpbackup and gprestore with data-only backup flag", func() {
  1147  			testutils.ExecuteSQLFile(restoreConn, "resources/test_tables_ddl.sql")
  1148  
  1149  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1150  				"--data-only")
  1151  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1152  				"--redirect-db", "restoredb")
  1153  
  1154  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1155  			assertDataRestored(restoreConn, schema2TupleCounts)
  1156  		})
  1157  		It("runs gpbackup and gprestore with the data-only restore flag", func() {
  1158  			testutils.ExecuteSQLFile(restoreConn, "resources/test_tables_ddl.sql")
  1159  			testhelper.AssertQueryRuns(backupConn, "SELECT pg_catalog.setval('public.myseq2', 8888, false)")
  1160  			defer testhelper.AssertQueryRuns(backupConn, "SELECT pg_catalog.setval('public.myseq2', 100, false)")
  1161  
  1162  			timestamp := gpbackup(gpbackupPath, backupHelperPath)
  1163  			output := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1164  				"--redirect-db", "restoredb",
  1165  				"--data-only")
  1166  
  1167  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1168  			assertDataRestored(restoreConn, schema2TupleCounts)
  1169  
  1170  			// Assert that sequence values have been properly
  1171  			// updated as part of special sequence handling during
  1172  			// gprestore --data-only calls
  1173  			restoreSequenceValue := dbconn.MustSelectString(restoreConn,
  1174  				`SELECT last_value FROM public.myseq2`)
  1175  			Expect(restoreSequenceValue).To(Equal("8888"))
  1176  			Expect(string(output)).To(ContainSubstring("Restoring sequence values"))
  1177  		})
  1178  		It("runs gpbackup and gprestore with the metadata-only restore flag", func() {
  1179  			timestamp := gpbackup(gpbackupPath, backupHelperPath)
  1180  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1181  				"--redirect-db", "restoredb",
  1182  				"--metadata-only")
  1183  
  1184  			assertDataRestored(restoreConn, map[string]int{
  1185  				"public.foo": 0, "schema2.foo3": 0})
  1186  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1187  		})
  1188  		It("runs gpbackup and gprestore with leaf-partition-data and backupDir flags", func() {
  1189  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1190  				"--leaf-partition-data",
  1191  				"--backup-dir", backupDir)
  1192  			output := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1193  				"--redirect-db", "restoredb",
  1194  				"--backup-dir", backupDir)
  1195  			Expect(string(output)).To(ContainSubstring("table 31 of 31"))
  1196  
  1197  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1198  			assertDataRestored(restoreConn, schema2TupleCounts)
  1199  		})
  1200  		It("runs gpbackup and gprestore with no-compression flag", func() {
  1201  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1202  				"--no-compression",
  1203  				"--backup-dir", backupDir)
  1204  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1205  				"--redirect-db", "restoredb",
  1206  				"--backup-dir", backupDir)
  1207  			configFile, err := path.Glob(path.Join(backupDir, "*-1/backups/*",
  1208  				timestamp, "*config.yaml"))
  1209  			Expect(err).ToNot(HaveOccurred())
  1210  			Expect(configFile).To(HaveLen(1))
  1211  
  1212  			contents, err := ioutil.ReadFile(configFile[0])
  1213  			Expect(err).ToNot(HaveOccurred())
  1214  
  1215  			Expect(string(contents)).To(ContainSubstring("compressed: false"))
  1216  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1217  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1218  			assertDataRestored(restoreConn, schema2TupleCounts)
  1219  		})
  1220  		It("runs gpbackup and gprestore with with-stats flag", func() {
  1221  			// gpbackup before version 1.18.0 does not dump pg_class statistics correctly
  1222  			skipIfOldBackupVersionBefore("1.18.0")
  1223  
  1224  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1225  				"--with-stats",
  1226  				"--backup-dir", backupDir)
  1227  			files, err := path.Glob(path.Join(backupDir, "*-1/backups/*",
  1228  				timestamp, "*statistics.sql"))
  1229  			Expect(err).ToNot(HaveOccurred())
  1230  			Expect(files).To(HaveLen(1))
  1231  
  1232  			output := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1233  				"--redirect-db", "restoredb",
  1234  				"--with-stats",
  1235  				"--backup-dir", backupDir)
  1236  
  1237  			Expect(string(output)).To(ContainSubstring("Query planner statistics restore complete"))
  1238  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1239  			assertDataRestored(restoreConn, schema2TupleCounts)
  1240  			assertPGClassStatsRestored(backupConn, restoreConn, publicSchemaTupleCounts)
  1241  			assertPGClassStatsRestored(backupConn, restoreConn, schema2TupleCounts)
  1242  
  1243  			statsQuery := fmt.Sprintf(`SELECT count(*) AS string FROM pg_statistic st left join pg_class cl on st.starelid = cl.oid left join pg_namespace nm on cl.relnamespace = nm.oid where %s;`, backup.SchemaFilterClause("nm"))
  1244  			backupStatisticCount := dbconn.MustSelectString(backupConn, statsQuery)
  1245  			restoredStatisticsCount := dbconn.MustSelectString(restoreConn, statsQuery)
  1246  
  1247  			Expect(backupStatisticCount).To(Equal(restoredStatisticsCount))
  1248  
  1249  			restoredTablesAnalyzed := dbconn.MustSelectString(restoreConn,
  1250  				`SELECT count(*) FROM pg_stat_last_operation WHERE objid IN ('public.foo'::regclass::oid, 'public.holds'::regclass::oid, 'public.sales'::regclass::oid, 'schema2.returns'::regclass::oid, 'schema2.foo2'::regclass::oid, 'schema2.foo3'::regclass::oid, 'schema2.ao1'::regclass::oid, 'schema2.ao2'::regclass::oid) AND staactionname='ANALYZE';`)
  1251  			Expect(restoredTablesAnalyzed).To(Equal("0"))
  1252  		})
  1253  		It("restores statistics only for tables specified in --include-table flag when runs gprestore with with-stats flag", func() {
  1254  			// gpbackup before version 1.18.0 does not dump pg_class statistics correctly
  1255  			skipIfOldBackupVersionBefore("1.18.0")
  1256  
  1257  			testhelper.AssertQueryRuns(backupConn,
  1258  				"CREATE TABLE public.table_to_include_with_stats(i int)")
  1259  			testhelper.AssertQueryRuns(backupConn,
  1260  				"INSERT INTO public.table_to_include_with_stats SELECT generate_series(0,9);")
  1261  
  1262  			defer testhelper.AssertQueryRuns(backupConn,
  1263  				"DROP TABLE public.table_to_include_with_stats")
  1264  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1265  				"--with-stats",
  1266  				"--backup-dir", backupDir)
  1267  			statFiles, err := path.Glob(path.Join(backupDir, "*-1/backups/*",
  1268  				timestamp, "*statistics.sql"))
  1269  			Expect(err).ToNot(HaveOccurred())
  1270  			Expect(statFiles).To(HaveLen(1))
  1271  
  1272  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1273  				"--redirect-db", "restoredb",
  1274  				"--with-stats",
  1275  				"--backup-dir", backupDir,
  1276  				"--include-table", "public.table_to_include_with_stats")
  1277  
  1278  			includeTableTupleCounts := map[string]int{
  1279  				"public.table_to_include_with_stats": 10,
  1280  			}
  1281  			assertDataRestored(backupConn, includeTableTupleCounts)
  1282  			assertPGClassStatsRestored(backupConn, restoreConn, includeTableTupleCounts)
  1283  
  1284  			rawCount := dbconn.MustSelectString(restoreConn,
  1285  				"SELECT count(*) FROM pg_statistic WHERE starelid = 'public.table_to_include_with_stats'::regclass::oid;")
  1286  			Expect(rawCount).To(Equal(strconv.Itoa(1)))
  1287  
  1288  			restoreTableCount := dbconn.MustSelectString(restoreConn,
  1289  				"SELECT count(*) FROM pg_class WHERE oid >= 16384 AND relnamespace in (SELECT oid from pg_namespace WHERE nspname in ('public', 'schema2'));")
  1290  			Expect(restoreTableCount).To(Equal(strconv.Itoa(1)))
  1291  		})
  1292  		It("runs gpbackup and gprestore with jobs flag", func() {
  1293  			skipIfOldBackupVersionBefore("1.3.0")
  1294  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1295  				"--backup-dir", backupDir,
  1296  				"--jobs", "4")
  1297  			output := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1298  				"--redirect-db", "restoredb",
  1299  				"--backup-dir", backupDir,
  1300  				"--jobs", "4",
  1301  				"--verbose")
  1302  
  1303  			expectedString := fmt.Sprintf("table %d of %d", TOTAL_CREATE_STATEMENTS, TOTAL_CREATE_STATEMENTS)
  1304  			Expect(string(output)).To(ContainSubstring(expectedString))
  1305  			assertRelationsCreated(restoreConn, TOTAL_RELATIONS)
  1306  			assertDataRestored(restoreConn, schema2TupleCounts)
  1307  			assertDataRestored(restoreConn, publicSchemaTupleCounts)
  1308  		})
  1309  		It("runs gpbackup with --version flag", func() {
  1310  			if useOldBackupVersion {
  1311  				Skip("This test is not needed for old backup versions")
  1312  			}
  1313  			command := exec.Command(gpbackupPath, "--version")
  1314  			output := mustRunCommand(command)
  1315  			Expect(string(output)).To(MatchRegexp(`gpbackup version \w+`))
  1316  		})
  1317  		It("runs gprestore with --version flag", func() {
  1318  			command := exec.Command(gprestorePath, "--version")
  1319  			output := mustRunCommand(command)
  1320  			Expect(string(output)).To(MatchRegexp(`gprestore version \w+`))
  1321  		})
  1322  		It("runs gprestore with --include-schema and --exclude-table flag", func() {
  1323  			Skip("Cloudberry skip")
  1324  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1325  				"--metadata-only")
  1326  			gprestore(gprestorePath, restoreHelperPath, timestamp,
  1327  				"--redirect-db", "restoredb",
  1328  				"--include-schema", "schema2",
  1329  				"--exclude-table", "schema2.returns",
  1330  				"--metadata-only")
  1331  			assertRelationsCreated(restoreConn, 4)
  1332  		})
  1333  		It("runs gprestore with jobs flag and postdata has metadata", func() {
  1334  			if true {
  1335  				Skip("This test is not needed for old backup versions")
  1336  			}
  1337  
  1338  			if false {
  1339  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLESPACE test_tablespace FILESPACE test_dir")
  1340  			} else {
  1341  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLESPACE test_tablespace LOCATION '/tmp/test_dir';")
  1342  			}
  1343  			defer testhelper.AssertQueryRuns(backupConn, "DROP TABLESPACE test_tablespace;")
  1344  
  1345  			// Store everything in this test schema for easy test cleanup.
  1346  			testhelper.AssertQueryRuns(backupConn, "CREATE SCHEMA postdata_metadata;")
  1347  			defer testhelper.AssertQueryRuns(backupConn, "DROP SCHEMA postdata_metadata CASCADE;")
  1348  			defer testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA postdata_metadata CASCADE;")
  1349  
  1350  			// Create a table and indexes. Currently for indexes, there are 4 possible pieces
  1351  			// of metadata: TABLESPACE, CLUSTER, REPLICA IDENTITY, and COMMENT.
  1352  			testhelper.AssertQueryRuns(backupConn, "CREATE TABLE postdata_metadata.foobar (a int NOT NULL);")
  1353  			testhelper.AssertQueryRuns(backupConn, "CREATE INDEX fooidx1 ON postdata_metadata.foobar USING btree(a) TABLESPACE test_tablespace;")
  1354  			testhelper.AssertQueryRuns(backupConn, "CREATE INDEX fooidx2 ON postdata_metadata.foobar USING btree(a) TABLESPACE test_tablespace;")
  1355  			testhelper.AssertQueryRuns(backupConn, "CREATE UNIQUE INDEX fooidx3 ON postdata_metadata.foobar USING btree(a) TABLESPACE test_tablespace;")
  1356  			testhelper.AssertQueryRuns(backupConn, "COMMENT ON INDEX postdata_metadata.fooidx1 IS 'hello';")
  1357  			testhelper.AssertQueryRuns(backupConn, "COMMENT ON INDEX postdata_metadata.fooidx2 IS 'hello';")
  1358  			testhelper.AssertQueryRuns(backupConn, "COMMENT ON INDEX postdata_metadata.fooidx3 IS 'hello';")
  1359  			testhelper.AssertQueryRuns(backupConn, "ALTER TABLE postdata_metadata.foobar CLUSTER ON fooidx3;")
  1360  			testhelper.AssertQueryRuns(backupConn, "ALTER TABLE postdata_metadata.foobar REPLICA IDENTITY USING INDEX fooidx3")
  1361  
  1362  			// Create a rule. Currently for rules, the only metadata is COMMENT.
  1363  			testhelper.AssertQueryRuns(backupConn, "CREATE RULE postdata_rule AS ON UPDATE TO postdata_metadata.foobar DO SELECT * FROM postdata_metadata.foobar;")
  1364  			testhelper.AssertQueryRuns(backupConn, "COMMENT ON RULE postdata_rule ON postdata_metadata.foobar IS 'hello';")
  1365  
  1366  			// Create an event trigger. Currently for event triggers, there are 2 possible
  1367  			// pieces of metadata: ENABLE and COMMENT.
  1368  			testhelper.AssertQueryRuns(backupConn, "CREATE OR REPLACE FUNCTION postdata_metadata.postdata_eventtrigger_func() RETURNS event_trigger AS $$ BEGIN END $$ LANGUAGE plpgsql;")
  1369  			testhelper.AssertQueryRuns(backupConn, "CREATE EVENT TRIGGER postdata_eventtrigger ON sql_drop EXECUTE PROCEDURE postdata_metadata.postdata_eventtrigger_func();")
  1370  			testhelper.AssertQueryRuns(backupConn, "ALTER EVENT TRIGGER postdata_eventtrigger DISABLE;")
  1371  			testhelper.AssertQueryRuns(backupConn, "COMMENT ON EVENT TRIGGER postdata_eventtrigger IS 'hello'")
  1372  
  1373  			timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1374  				"--metadata-only")
  1375  			output := gprestore(gprestorePath, restoreHelperPath, timestamp,
  1376  				"--redirect-db", "restoredb", "--jobs", "8", "--verbose")
  1377  
  1378  			// The gprestore parallel postdata restore should have succeeded without a CRITICAL error.
  1379  			stdout := string(output)
  1380  			Expect(stdout).To(Not(ContainSubstring("CRITICAL")))
  1381  			Expect(stdout).To(Not(ContainSubstring("Error encountered when executing statement")))
  1382  		})
  1383  		Describe("Edge case tests", func() {
  1384  			It(`successfully backs up precise real data types`, func() {
  1385  				// Versions before 1.13.0 do not set the extra_float_digits GUC
  1386  				skipIfOldBackupVersionBefore("1.13.0")
  1387  
  1388  				tableName := "public.test_real_precision"
  1389  				tableNameCopy := "public.test_real_precision_copy"
  1390  				testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`CREATE TABLE %s (val real)`, tableName))
  1391  				defer testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`DROP TABLE %s`, tableName))
  1392  				testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`INSERT INTO %s VALUES (0.100001216)`, tableName))
  1393  				testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`CREATE TABLE %s AS SELECT * FROM %s`, tableNameCopy, tableName))
  1394  				defer testhelper.AssertQueryRuns(backupConn, fmt.Sprintf(`DROP TABLE %s`, tableNameCopy))
  1395  
  1396  				// We use --jobs flag to make sure all parallel connections have the GUC set properly
  1397  				timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1398  					"--backup-dir", backupDir,
  1399  					"--dbname", "testdb", "--jobs", "2",
  1400  					"--include-table", fmt.Sprintf("%s", tableName),
  1401  					"--include-table", fmt.Sprintf("%s", tableNameCopy))
  1402  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  1403  					"--redirect-db", "restoredb",
  1404  					"--backup-dir", backupDir)
  1405  				tableCount := dbconn.MustSelectString(restoreConn, fmt.Sprintf("SELECT count(*) FROM %s WHERE val = 0.100001216::real", tableName))
  1406  				Expect(tableCount).To(Equal(strconv.Itoa(1)))
  1407  				tableCopyCount := dbconn.MustSelectString(restoreConn, fmt.Sprintf("SELECT count(*) FROM %s WHERE val = 0.100001216::real", tableNameCopy))
  1408  				Expect(tableCopyCount).To(Equal(strconv.Itoa(1)))
  1409  			})
  1410  			It("does not retrieve trigger constraints  with the rest of the constraints", func() {
  1411  				Skip("Cloudberry skip")
  1412  				if true {
  1413  					// TODO: Remove this once support is added
  1414  					Skip("Triggers on statements not yet supported in GPDB7, per src/backend/parser/gram.y:39460,39488")
  1415  				}
  1416  				testutils.SkipIfBefore6(backupConn)
  1417  				testhelper.AssertQueryRuns(backupConn,
  1418  					"CREATE TABLE table_multiple_constraints (a int)")
  1419  				defer testhelper.AssertQueryRuns(backupConn,
  1420  					"DROP TABLE IF EXISTS table_multiple_constraints CASCADE;")
  1421  
  1422  				// Add a trigger constraint
  1423  				testhelper.AssertQueryRuns(backupConn, `CREATE FUNCTION public.no_op_trig_fn() RETURNS trigger AS
  1424  $$begin RETURN NULL; end$$
  1425  LANGUAGE plpgsql NO SQL;`)
  1426  				defer testhelper.AssertQueryRuns(backupConn, `DROP FUNCTION IF EXISTS public.no_op_trig_fn() CASCADE`)
  1427  				testhelper.AssertQueryRuns(backupConn, "CREATE TRIGGER  test_trigger AFTER INSERT  ON public.table_multiple_constraints EXECUTE PROCEDURE public.no_op_trig_fn();")
  1428  
  1429  				// Add a non-trigger constraint
  1430  				testhelper.AssertQueryRuns(backupConn,
  1431  					"ALTER TABLE public.table_multiple_constraints ADD CONSTRAINT alter_table_with_primary_key_pkey PRIMARY KEY (a);")
  1432  
  1433  				// retrieve constraints, assert that only one is retrieved
  1434  				constraintsRetrieved := backup.GetConstraints(backupConn)
  1435  				Expect(len(constraintsRetrieved)).To(Equal(1))
  1436  
  1437  				// assert that the single retrieved constraint is the non-trigger constraint
  1438  				constraintRetrieved := constraintsRetrieved[0]
  1439  				Expect(constraintRetrieved.ConType).To(Equal("p"))
  1440  			})
  1441  			It("correctly distinguishes between domain and non-domain constraints", func() {
  1442  				testutils.SkipIfBefore6(backupConn)
  1443  				testhelper.AssertQueryRuns(backupConn,
  1444  					"CREATE TABLE table_multiple_constraints (a int)")
  1445  				defer testhelper.AssertQueryRuns(backupConn,
  1446  					"DROP TABLE IF EXISTS table_multiple_constraints CASCADE;")
  1447  
  1448  				// Add a domain with a constraint
  1449  				testhelper.AssertQueryRuns(backupConn, "CREATE DOMAIN public.const_domain1 AS text CONSTRAINT cons_check1 CHECK (char_length(VALUE) = 5);")
  1450  				defer testhelper.AssertQueryRuns(backupConn, `DROP DOMAIN IF EXISTS public.const_domain1;`)
  1451  
  1452  				// Add a non-trigger constraint
  1453  				testhelper.AssertQueryRuns(backupConn,
  1454  					"ALTER TABLE public.table_multiple_constraints ADD CONSTRAINT alter_table_with_primary_key_pkey PRIMARY KEY (a);")
  1455  
  1456  				// retrieve constraints, assert that two are retrieved, assert that the domain constraint is correctly categorized
  1457  				constraintsRetrieved := backup.GetConstraints(backupConn)
  1458  				Expect(len(constraintsRetrieved)).To(Equal(2))
  1459  				for _, constr := range constraintsRetrieved {
  1460  					if constr.Name == "cons_check1" {
  1461  						Expect(constr.IsDomainConstraint).To(Equal(true))
  1462  					} else if constr.Name == "alter_table_with_primary_key_pkey" {
  1463  						Expect(constr.IsDomainConstraint).To(Equal(false))
  1464  					} else {
  1465  						Fail("Unrecognized constraint in end-to-end test database")
  1466  					}
  1467  				}
  1468  			})
  1469  			It("backup and restore all data when NOT VALID option on constraints is specified", func() {
  1470  				testutils.SkipIfBefore6(backupConn)
  1471  				testhelper.AssertQueryRuns(backupConn,
  1472  					"CREATE TABLE legacy_table_violate_constraints (a int)")
  1473  				defer testhelper.AssertQueryRuns(backupConn,
  1474  					"DROP TABLE legacy_table_violate_constraints")
  1475  				testhelper.AssertQueryRuns(backupConn,
  1476  					"INSERT INTO legacy_table_violate_constraints values (0), (1), (2), (3), (4), (5), (6), (7)")
  1477  				testhelper.AssertQueryRuns(backupConn,
  1478  					"ALTER TABLE legacy_table_violate_constraints ADD CONSTRAINT new_constraint_not_valid CHECK (a > 4) NOT VALID")
  1479  				defer testhelper.AssertQueryRuns(backupConn,
  1480  					"ALTER TABLE legacy_table_violate_constraints DROP CONSTRAINT new_constraint_not_valid")
  1481  
  1482  				timestamp := gpbackup(gpbackupPath, backupHelperPath,
  1483  					"--backup-dir", backupDir)
  1484  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  1485  					"--redirect-db", "restoredb",
  1486  					"--backup-dir", backupDir)
  1487  
  1488  				legacySchemaTupleCounts := map[string]int{
  1489  					`public."legacy_table_violate_constraints"`: 8,
  1490  				}
  1491  				assertDataRestored(restoreConn, legacySchemaTupleCounts)
  1492  
  1493  				isConstraintHere := dbconn.MustSelectString(restoreConn,
  1494  					"SELECT count(*) FROM pg_constraint WHERE conname='new_constraint_not_valid'")
  1495  				Expect(isConstraintHere).To(Equal(strconv.Itoa(1)))
  1496  
  1497  				_, err := restoreConn.Exec("INSERT INTO legacy_table_violate_constraints VALUES (1)")
  1498  				Expect(err).To(HaveOccurred())
  1499  				assertArtifactsCleaned(restoreConn, timestamp)
  1500  			})
  1501  			It("runs gpbackup and gprestore to backup tables depending on functions", func() {
  1502  				skipIfOldBackupVersionBefore("1.19.0")
  1503  				testhelper.AssertQueryRuns(backupConn, "CREATE FUNCTION func1(val integer) RETURNS integer AS $$ BEGIN RETURN val + 1; END; $$ LANGUAGE PLPGSQL;;")
  1504  				defer testhelper.AssertQueryRuns(backupConn, "DROP FUNCTION func1(val integer);")
  1505  
  1506  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLE test_depends_on_function (id integer, claim_id character varying(20) DEFAULT ('WC-'::text || func1(10)::text)) DISTRIBUTED BY (id);")
  1507  				defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE test_depends_on_function;")
  1508  				testhelper.AssertQueryRuns(backupConn, "INSERT INTO  test_depends_on_function values (1);")
  1509  				testhelper.AssertQueryRuns(backupConn, "INSERT INTO  test_depends_on_function values (2);")
  1510  
  1511  				timestamp := gpbackup(gpbackupPath, backupHelperPath)
  1512  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  1513  					"--redirect-db", "restoredb")
  1514  
  1515  				assertRelationsCreated(restoreConn, TOTAL_RELATIONS+1) // for new table
  1516  				assertDataRestored(restoreConn, schema2TupleCounts)
  1517  				assertDataRestored(restoreConn, map[string]int{
  1518  					"public.foo":                      40000,
  1519  					"public.holds":                    50000,
  1520  					"public.sales":                    13,
  1521  					"public.test_depends_on_function": 2})
  1522  				assertArtifactsCleaned(restoreConn, timestamp)
  1523  			})
  1524  			It("runs gpbackup and gprestore to backup functions depending on tables", func() {
  1525  				skipIfOldBackupVersionBefore("1.19.0")
  1526  
  1527  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLE to_use_for_function (n int);")
  1528  				defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE to_use_for_function;")
  1529  
  1530  				testhelper.AssertQueryRuns(backupConn, "INSERT INTO  to_use_for_function values (1);")
  1531  				testhelper.AssertQueryRuns(backupConn, "CREATE FUNCTION func1(val integer) RETURNS integer AS $$ BEGIN RETURN val + (SELECT n FROM to_use_for_function); END; $$ LANGUAGE PLPGSQL;;")
  1532  
  1533  				defer testhelper.AssertQueryRuns(backupConn, "DROP FUNCTION func1(val integer);")
  1534  
  1535  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLE test_depends_on_function (id integer, claim_id character varying(20) DEFAULT ('WC-'::text || func1(10)::text)) DISTRIBUTED BY (id);")
  1536  				defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE test_depends_on_function;")
  1537  				testhelper.AssertQueryRuns(backupConn, "INSERT INTO  test_depends_on_function values (1);")
  1538  
  1539  				timestamp := gpbackup(gpbackupPath, backupHelperPath)
  1540  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  1541  					"--redirect-db", "restoredb")
  1542  
  1543  				assertRelationsCreated(restoreConn, TOTAL_RELATIONS+2) // for 2 new tables
  1544  				assertDataRestored(restoreConn, schema2TupleCounts)
  1545  				assertDataRestored(restoreConn, map[string]int{
  1546  					"public.foo":                      40000,
  1547  					"public.holds":                    50000,
  1548  					"public.sales":                    13,
  1549  					"public.to_use_for_function":      1,
  1550  					"public.test_depends_on_function": 1})
  1551  
  1552  				assertArtifactsCleaned(restoreConn, timestamp)
  1553  			})
  1554  			It("Can restore xml with xmloption set to document", func() {
  1555  				testutils.SkipIfBefore6(backupConn)
  1556  				// Set up the XML table that contains XML content
  1557  				testhelper.AssertQueryRuns(backupConn, "CREATE TABLE xml_test AS SELECT xml 'fooxml'")
  1558  				defer testhelper.AssertQueryRuns(backupConn, "DROP TABLE xml_test")
  1559  
  1560  				// Set up database that has xmloption default to document instead of content
  1561  				testhelper.AssertQueryRuns(backupConn, "CREATE DATABASE document_db")
  1562  				defer testhelper.AssertQueryRuns(backupConn, "DROP DATABASE document_db")
  1563  				testhelper.AssertQueryRuns(backupConn, "ALTER DATABASE document_db SET xmloption TO document")
  1564  
  1565  				timestamp := gpbackup(gpbackupPath, backupHelperPath, "--include-table", "public.xml_test")
  1566  
  1567  				gprestore(gprestorePath, restoreHelperPath, timestamp,
  1568  					"--redirect-db", "document_db")
  1569  			})
  1570  			It("does not hold lock on gp_segment_configuration when backup is in progress", func() {
  1571  				if useOldBackupVersion {
  1572  					Skip("This test is not needed for old backup versions")
  1573  				}
  1574  				// Block on pg_trigger, which gpbackup queries after gp_segment_configuration
  1575  				backupConn.MustExec("BEGIN; LOCK TABLE pg_trigger IN ACCESS EXCLUSIVE MODE")
  1576  
  1577  				args := []string{
  1578  					"--dbname", "testdb",
  1579  					"--backup-dir", backupDir,
  1580  					"--verbose"}
  1581  				cmd := exec.Command(gpbackupPath, args...)
  1582  
  1583  				backupConn.MustExec("COMMIT")
  1584  				anotherConn := testutils.SetupTestDbConn("testdb")
  1585  				defer anotherConn.Close()
  1586  				var lockCount int
  1587  				go func() {
  1588  					gpSegConfigQuery := `SELECT * FROM pg_locks l, pg_class c, pg_namespace n WHERE l.relation = c.oid AND n.oid = c.relnamespace AND c.relname = 'gp_segment_configuration';`
  1589  					_ = anotherConn.Get(&lockCount, gpSegConfigQuery)
  1590  				}()
  1591  
  1592  				Expect(lockCount).To(Equal(0))
  1593  
  1594  				output, _ := cmd.CombinedOutput()
  1595  				stdout := string(output)
  1596  				Expect(stdout).To(ContainSubstring("Backup completed successfully"))
  1597  			})
  1598  			It("properly handles various implicit casts on pg_catalog.text", func() {
  1599  				if useOldBackupVersion {
  1600  					Skip("This test is not needed for old backup versions")
  1601  				}
  1602  				// casts already exist on 4X
  1603  				testutils.ExecuteSQLFile(backupConn, "resources/implicit_casts.sql")
  1604  
  1605  				args := []string{
  1606  					"--dbname", "testdb",
  1607  					"--backup-dir", backupDir,
  1608  					"--verbose"}
  1609  				cmd := exec.Command(gpbackupPath, args...)
  1610  
  1611  				output, _ := cmd.CombinedOutput()
  1612  				stdout := string(output)
  1613  				Expect(stdout).To(ContainSubstring("Backup completed successfully"))
  1614  			})
  1615  		})
  1616  	})
  1617  	Describe("Restore to a different-sized cluster", func() {
  1618  		if useOldBackupVersion {
  1619  			Skip("This test is not needed for old backup versions")
  1620  		}
  1621  		// The backups for these tests were taken on GPDB version 6.20.3+dev.4.g9a08259bd1 build dev.
  1622  		BeforeEach(func() {
  1623  			testutils.SkipIfBefore6(backupConn)
  1624  			testhelper.AssertQueryRuns(backupConn, "CREATE ROLE testrole;")
  1625  		})
  1626  		AfterEach(func() {
  1627  			testhelper.AssertQueryRuns(restoreConn, fmt.Sprintf("REASSIGN OWNED BY testrole TO %s;", backupConn.User))
  1628  			testhelper.AssertQueryRuns(restoreConn, "DROP ROLE testrole;")
  1629  		})
  1630  		DescribeTable("",
  1631  			func(fullTimestamp string, incrementalTimestamp string, tarBaseName string, isIncrementalRestore bool, isFilteredRestore bool, isSingleDataFileRestore bool) {
  1632  				Skip("cloudberry skip")
  1633  				if isSingleDataFileRestore && segmentCount != 3 {
  1634  					Skip("Single data file resize restores currently require a 3-segment cluster to test.")
  1635  				}
  1636  				extractDirectory := path.Join(backupDir, tarBaseName)
  1637  				os.Mkdir(extractDirectory, 0777)
  1638  				command := exec.Command("tar", "-xzf", fmt.Sprintf("resources/%s.tar.gz", tarBaseName), "-C", extractDirectory)
  1639  				mustRunCommand(command)
  1640  				defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schemaone CASCADE;`)
  1641  				defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schematwo CASCADE;`)
  1642  				defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schemathree CASCADE;`)
  1643  
  1644  				// Move extracted data files to the proper directory for a larger-to-smaller restore, if necessary
  1645  				// Assumes all saved backups have a name in the format "N-segment-db-..." where N is the original cluster size
  1646  				re := regexp.MustCompile("^([0-9]+)-.*")
  1647  				origSize, _ := strconv.Atoi(re.FindStringSubmatch(tarBaseName)[1])
  1648  				timestamps := []string{fullTimestamp, incrementalTimestamp}
  1649  				if origSize > segmentCount {
  1650  					for i := segmentCount; i < origSize; i++ {
  1651  						for _, ts := range timestamps {
  1652  							if ts != "" {
  1653  								dataFilePath := fmt.Sprintf("%s/demoDataDir%s/backups/%s/%s/%s", extractDirectory, "%d", ts[0:8], ts, "%s")
  1654  								files, _ := path.Glob(fmt.Sprintf(dataFilePath, i, "*"))
  1655  								for _, dataFile := range files {
  1656  									os.Rename(dataFile, fmt.Sprintf(dataFilePath, i%segmentCount, path.Base(dataFile)))
  1657  								}
  1658  							}
  1659  						}
  1660  					}
  1661  				}
  1662  
  1663  				// This block stops the test if it hangs.  It was introduced to prevent hangs causing timeout failures in Concourse CI.
  1664  				// Those flakes have stop being observed, and can no longer be reproduced.  Some changes have since been made
  1665  				// that may obviate the original cause of the hangs, but a definitive RCA was never accomplished.
  1666  				// This block is elegant is kept around for now in case the hangs reappear.
  1667  				// TODO: if pipe-related hangs have not been observed by 6/1/2023, remove this code as it is not needed.
  1668  				// completed := make(chan bool)
  1669  				// defer func() { completed <- true }() // Whether the test succeeds or fails, mark it as complete
  1670  				// go func() {
  1671  				// 	// No test run has been observed to take more than a few minutes without a hang,
  1672  				// 	// so loop 5 times and check for success after 1 minute each
  1673  				// 	for i := 0; i < 5; i++ {
  1674  				// 		select {
  1675  				// 		case <-completed:
  1676  				// 			return
  1677  				// 		default:
  1678  				// 			time.Sleep(time.Minute)
  1679  				// 		}
  1680  				// 	}
  1681  				// 	// If we get here, this test is hanging, stop the processes.
  1682  				// 	// If the test succeeded or failed, we'll return before here.
  1683  				// 	_ = exec.Command("pkill", "-9", "gpbackup_helper").Run()
  1684  				// 	_ = exec.Command("pkill", "-9", "gprestore").Run()
  1685  				// }()
  1686  
  1687  				gprestoreArgs := []string{
  1688  					"--timestamp", fullTimestamp,
  1689  					"--redirect-db", "restoredb",
  1690  					"--backup-dir", extractDirectory,
  1691  					"--resize-cluster",
  1692  					"--on-error-continue"}
  1693  				if isFilteredRestore {
  1694  					gprestoreArgs = append(gprestoreArgs, "--include-schema", "schematwo")
  1695  				}
  1696  				gprestoreCmd := exec.Command(gprestorePath, gprestoreArgs...)
  1697  				output, err := gprestoreCmd.CombinedOutput()
  1698  				fmt.Println(string(output))
  1699  				Expect(err).ToNot(HaveOccurred())
  1700  
  1701  				// check row counts
  1702  				testutils.ExecuteSQLFile(restoreConn, "resources/test_rowcount_ddl.sql")
  1703  				rowcountsFilename := fmt.Sprintf("/tmp/%s-rowcounts.txt", tarBaseName)
  1704  				_ = exec.Command("psql",
  1705  					"-d", "restoredb",
  1706  					"-c", "select * from cnt_rows();",
  1707  					"-o", rowcountsFilename).Run()
  1708  				expectedRowMap := unMarshalRowCounts(fmt.Sprintf("resources/%d-segment-db-rowcounts.txt", segmentCount))
  1709  				actualRowMap := unMarshalRowCounts(rowcountsFilename)
  1710  				for key := range expectedRowMap {
  1711  					if strings.HasPrefix(key, "schemathree") {
  1712  						delete(expectedRowMap, key)
  1713  					} else if isFilteredRestore && !strings.HasPrefix(key, "schematwo") {
  1714  						delete(expectedRowMap, key)
  1715  					}
  1716  				}
  1717  				Expect(err).To(Not(HaveOccurred()))
  1718  				if !reflect.DeepEqual(expectedRowMap, actualRowMap) {
  1719  					Fail(fmt.Sprintf("Expected row count map for full restore\n\n\t%v\n\nto equal\n\n\t%v\n\n", actualRowMap, expectedRowMap))
  1720  				}
  1721  
  1722  				if isIncrementalRestore {
  1723  					// restore subsequent incremental backup
  1724  					gprestoreincrCmd := exec.Command(gprestorePath,
  1725  						"--timestamp", incrementalTimestamp,
  1726  						"--redirect-db", "restoredb",
  1727  						"--incremental",
  1728  						"--data-only",
  1729  						"--backup-dir", extractDirectory,
  1730  						"--resize-cluster",
  1731  						"--on-error-continue")
  1732  					incroutput, err := gprestoreincrCmd.CombinedOutput()
  1733  					fmt.Println(string(incroutput))
  1734  					Expect(err).ToNot(HaveOccurred())
  1735  
  1736  					// check row counts
  1737  					_ = exec.Command("psql",
  1738  						"-d", "restoredb",
  1739  						"-c", "select * from cnt_rows();",
  1740  						"-o", rowcountsFilename).Run()
  1741  					expectedIncrRowMap := unMarshalRowCounts(fmt.Sprintf("resources/%d-segment-db-incremental-rowcounts.txt", segmentCount))
  1742  					actualIncrRowMap := unMarshalRowCounts(rowcountsFilename)
  1743  
  1744  					Expect(err).To(Not(HaveOccurred()))
  1745  					if !reflect.DeepEqual(expectedIncrRowMap, actualIncrRowMap) {
  1746  						Fail(fmt.Sprintf("Expected row count map for incremental restore\n%v\nto equal\n%v\n", actualIncrRowMap, expectedIncrRowMap))
  1747  					}
  1748  				}
  1749  			},
  1750  			Entry("Can backup a 9-segment cluster and restore to current cluster", "20220909090738", "", "9-segment-db", false, false, false),
  1751  			Entry("Can backup a 9-segment cluster and restore to current cluster with single data file", "20220909090827", "", "9-segment-db-single-data-file", false, false, true),
  1752  			Entry("Can backup a 9-segment cluster and restore to current cluster with incremental backups", "20220909150254", "20220909150353", "9-segment-db-incremental", true, false, false),
  1753  			Entry("Can backup a 7-segment cluster and restore to to current cluster", "20220908145504", "", "7-segment-db", false, false, false),
  1754  			Entry("Can backup a 7-segment cluster and restore to current cluster single data file", "20220912101931", "", "7-segment-db-single-data-file", false, false, true),
  1755  			Entry("Can backup a 7-segment cluster and restore to current cluster with a filter", "20220908145645", "", "7-segment-db-filter", false, true, false),
  1756  			Entry("Can backup a 7-segment cluster and restore to current cluster with single data file and filter", "20220912102413", "", "7-segment-db-single-data-file-filter", false, true, true),
  1757  			Entry("Can backup a 2-segment cluster and restore to current cluster single data file and filter", "20220908150223", "", "2-segment-db-single-data-file-filter", false, true, true),
  1758  			Entry("Can backup a 2-segment cluster and restore to current cluster single data file", "20220908150159", "", "2-segment-db-single-data-file", false, false, true),
  1759  			Entry("Can backup a 2-segment cluster and restore to current cluster with filter", "20220908150238", "", "2-segment-db-filter", false, true, false),
  1760  			Entry("Can backup a 2-segment cluster and restore to current cluster with incremental backups and a single data file", "20220909150612", "20220909150622", "2-segment-db-incremental", true, false, false),
  1761  			Entry("Can backup a 1-segment cluster and restore to current cluster", "20220908150735", "", "1-segment-db", false, false, false),
  1762  			Entry("Can backup a 1-segment cluster and restore to current cluster with single data file", "20220908150752", "", "1-segment-db-single-data-file", false, false, true),
  1763  			Entry("Can backup a 1-segment cluster and restore to current cluster with a filter", "20220908150804", "", "1-segment-db-filter", false, true, false),
  1764  			Entry("Can backup a 3-segment cluster and restore to current cluster", "20220909094828", "", "3-segment-db", false, false, false),
  1765  		)
  1766  
  1767  		Describe("Restore from various-sized clusters with a replicated table", func() {
  1768  			if useOldBackupVersion {
  1769  				Skip("This test is not needed for old backup versions")
  1770  			}
  1771  			// The backups for these tests were taken on GPDB version 6.20.3+dev.4.g9a08259bd1 build dev.
  1772  			DescribeTable("",
  1773  				func(fullTimestamp string, tarBaseName string) {
  1774  
  1775  					testutils.SkipIfBefore6(backupConn)
  1776  					if true {
  1777  						Skip("Resize-cluster was only added in version 1.25")
  1778  					}
  1779  					extractDirectory := path.Join(backupDir, tarBaseName)
  1780  					os.Mkdir(extractDirectory, 0777)
  1781  					command := exec.Command("tar", "-xzf", fmt.Sprintf("resources/%s.tar.gz", tarBaseName), "-C", extractDirectory)
  1782  					mustRunCommand(command)
  1783  					defer testhelper.AssertQueryRuns(restoreConn, `DROP SCHEMA IF EXISTS schemaone CASCADE;`)
  1784  
  1785  					// Move extracted data files to the proper directory for a larger-to-smaller restore, if necessary
  1786  					// Assumes all saved backups have a name in the format "N-segment-db-..." where N is the original cluster size
  1787  					re := regexp.MustCompile("^([0-9]+)-.*")
  1788  					origSize, _ := strconv.Atoi(re.FindStringSubmatch(tarBaseName)[1])
  1789  					if origSize > segmentCount {
  1790  						for i := segmentCount; i < origSize; i++ {
  1791  							dataFilePath := fmt.Sprintf("%s/demoDataDir%s/backups/%s/%s/%s", extractDirectory, "%d", fullTimestamp[0:8], fullTimestamp, "%s")
  1792  							files, _ := path.Glob(fmt.Sprintf(dataFilePath, i, "*"))
  1793  							for _, dataFile := range files {
  1794  								os.Rename(dataFile, fmt.Sprintf(dataFilePath, i%segmentCount, path.Base(dataFile)))
  1795  							}
  1796  						}
  1797  					}
  1798  
  1799  					gprestoreArgs := []string{
  1800  						"--timestamp", fullTimestamp,
  1801  						"--redirect-db", "restoredb",
  1802  						"--backup-dir", extractDirectory,
  1803  						"--resize-cluster",
  1804  						"--on-error-continue"}
  1805  
  1806  					gprestoreCmd := exec.Command(gprestorePath, gprestoreArgs...)
  1807  					output, err := gprestoreCmd.CombinedOutput()
  1808  					fmt.Println(string(output))
  1809  					Expect(err).ToNot(HaveOccurred())
  1810  
  1811  					// check row counts on each segment and on coordinator, expecting 1 table with 100 rows, replicated across all
  1812  					for _, seg := range backupCluster.Segments {
  1813  						if seg.ContentID != -1 {
  1814  							assertSegmentDataRestored(seg.ContentID, "schemaone.test_table", 100)
  1815  						}
  1816  					}
  1817  					assertDataRestored(restoreConn, map[string]int{
  1818  						"schemaone.test_table": 100,
  1819  					})
  1820  
  1821  					// check check gp_distribution_policy at end of test to ensure it's set to destSize
  1822  					numSegments := dbconn.MustSelectString(restoreConn, "SELECT numsegments FROM gp_distribution_policy where localoid = 'schemaone.test_table'::regclass::oid")
  1823  					Expect(numSegments).To(Equal(strconv.Itoa(segmentCount)))
  1824  
  1825  				},
  1826  //				Entry("Can backup a 1-segment cluster and restore to current cluster with replicated tables", "20221104023842", "1-segment-db-replicated"),
  1827  //				Entry("Can backup a 3-segment cluster and restore to current cluster with replicated tables", "20221104023611", "3-segment-db-replicated"),
  1828  //				Entry("Can backup a 9-segment cluster and restore to current cluster with replicated tables", "20221104025347", "9-segment-db-replicated"),
  1829  			)
  1830  		})
  1831  
  1832  		It("Will not restore to a different-size cluster if the SegmentCount of the backup is unknown", func() {
  1833  			Skip("Cloudberry skip")
  1834  			if useOldBackupVersion {
  1835  				Skip("This test is not needed for old backup versions")
  1836  			}
  1837  			// This backup set is identical to the 5-segment-db-tar.gz backup set, except that the
  1838  			// segmentcount parameter was removed from the config file in the coordinator data directory.
  1839  			command := exec.Command("tar", "-xzf", "resources/no-segment-count-db.tar.gz", "-C", backupDir)
  1840  			mustRunCommand(command)
  1841  
  1842  			gprestoreCmd := exec.Command(gprestorePath,
  1843  				"--timestamp", "20220415160842",
  1844  				"--redirect-db", "restoredb",
  1845  				"--backup-dir", path.Join(backupDir, "5-segment-db"),
  1846  				"--resize-cluster",
  1847  				"--on-error-continue")
  1848  			output, err := gprestoreCmd.CombinedOutput()
  1849  			Expect(err).To(HaveOccurred())
  1850  			Expect(string(output)).To(MatchRegexp("Segment count for backup with timestamp [0-9]+ is unknown, cannot restore using --resize-cluster flag"))
  1851  		})
  1852  		It("Will not restore to a different-size cluster without the approprate flag", func() {
  1853  			Skip("Cloudberry  skip")
  1854  			command := exec.Command("tar", "-xzf", "resources/5-segment-db.tar.gz", "-C", backupDir)
  1855  			mustRunCommand(command)
  1856  
  1857  			gprestoreCmd := exec.Command(gprestorePath,
  1858  				"--timestamp", "20220415160842",
  1859  				"--redirect-db", "restoredb",
  1860  				"--backup-dir", path.Join(backupDir, "5-segment-db"),
  1861  				"--on-error-continue")
  1862  			output, err := gprestoreCmd.CombinedOutput()
  1863  			Expect(err).To(HaveOccurred())
  1864  			Expect(string(output)).To(ContainSubstring(fmt.Sprintf("Cannot restore a backup taken on a cluster with 5 segments to a cluster with %d segments unless the --resize-cluster flag is used.", segmentCount)))
  1865  		})
  1866  	})
  1867  	Describe("Restore indexes and constraints on exchanged partition tables", func() {
  1868  		BeforeEach(func() {
  1869  			testutils.SkipIfBefore6(backupConn)
  1870  			testhelper.AssertQueryRuns(backupConn, `
  1871                      CREATE SCHEMA schemaone;
  1872                      CREATE TABLE schemaone.part_table_for_upgrade (a INT, b INT) DISTRIBUTED BY (b) PARTITION BY RANGE(b) (PARTITION alpha  END (3), PARTITION beta START (3));
  1873  					CREATE INDEX upgrade_idx1 ON schemaone.part_table_for_upgrade(a) WHERE b > 10;
  1874  					ALTER TABLE schemaone.part_table_for_upgrade ADD PRIMARY KEY(a, b);
  1875  
  1876  					CREATE TABLE schemaone.like_table (like schemaone.part_table_for_upgrade INCLUDING CONSTRAINTS INCLUDING INDEXES) DISTRIBUTED BY (b);
  1877                      ALTER TABLE schemaone.part_table_for_upgrade EXCHANGE PARTITION beta WITH TABLE schemaone.like_table;`)
  1878  		})
  1879  		AfterEach(func() {
  1880  			testhelper.AssertQueryRuns(backupConn, "DROP SCHEMA schemaone CASCADE;")
  1881  			testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA schemaone CASCADE;")
  1882  		})
  1883  
  1884  		It("Automatically updates index names correctly", func() {
  1885  			timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  1886  
  1887  			gprestoreArgs := []string{
  1888  				"--timestamp", timestamp,
  1889  				"--redirect-db", "restoredb",
  1890  				"--backup-dir", backupDir}
  1891  			gprestoreCmd := exec.Command(gprestorePath, gprestoreArgs...)
  1892  			_, err := gprestoreCmd.CombinedOutput()
  1893  			Expect(err).ToNot(HaveOccurred())
  1894  
  1895  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
  1896  
  1897  			// Indexes do not need to be renamed on partition exchange in GPDB7+ due to new syntax.
  1898  			expectedValue := false
  1899  			indexSuffix := "idx"
  1900  			Expect(strings.Contains(string(metadataFileContents), fmt.Sprintf("CREATE INDEX like_table_a_%s ON schemaone.like_table USING btree (a) WHERE (b > 10);",
  1901  				indexSuffix))).To(Equal(expectedValue))
  1902  			Expect(strings.Contains(string(metadataFileContents), fmt.Sprintf("CREATE INDEX part_table_for_upgrade_1_prt_beta_a_%s ON schemaone.like_table USING btree (a) WHERE (b > 10);",
  1903  				indexSuffix))).ToNot(Equal(expectedValue))
  1904  		})
  1905  
  1906  		It("Automatically updates constraint names correctly", func() {
  1907  			timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  1908  			gprestoreArgs := []string{
  1909  				"--timestamp", timestamp,
  1910  				"--redirect-db", "restoredb",
  1911  				"--backup-dir", backupDir}
  1912  			gprestoreCmd := exec.Command(gprestorePath, gprestoreArgs...)
  1913  			_, err := gprestoreCmd.CombinedOutput()
  1914  			Expect(err).ToNot(HaveOccurred())
  1915  
  1916  			// assert constraint names are what we expect
  1917  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
  1918  			Expect(strings.Contains(string(metadataFileContents), "ALTER TABLE ONLY schemaone.like_table ADD CONSTRAINT like_table_pkey PRIMARY KEY (a, b);")).To(BeTrue())
  1919  			Expect(strings.Contains(string(metadataFileContents), "ALTER TABLE ONLY schemaone.like_table ADD CONSTRAINT part_table_for_upgrade_pkey PRIMARY KEY (a, b);")).ToNot(BeTrue())
  1920  
  1921  		})
  1922  	})
  1923  	Describe("Backup and restore external partitions", func() {
  1924  		It("Will correctly handle external partitions on multiple versions of GPDB", func() {
  1925  			testutils.SkipIfBefore6(backupConn)
  1926  			testhelper.AssertQueryRuns(backupConn, "CREATE SCHEMA testchema;")
  1927  			defer testhelper.AssertQueryRuns(backupConn, "DROP SCHEMA IF EXISTS testchema CASCADE;")
  1928  			defer testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA IF EXISTS testchema CASCADE;")
  1929  			testhelper.AssertQueryRuns(backupConn, `CREATE TABLE testchema.multipartition (a int,b date,c text,d int)
  1930                     DISTRIBUTED BY (a)
  1931                     PARTITION BY RANGE (b)
  1932                     SUBPARTITION BY LIST (c)
  1933                     SUBPARTITION TEMPLATE
  1934                     (SUBPARTITION usa values ('usa'),
  1935                     SUBPARTITION apj values ('apj'),
  1936                     SUBPARTITION eur values ('eur'))
  1937                     (PARTITION Jan16 START (date '2016-01-01') INCLUSIVE ,
  1938                       PARTITION Feb16 START (date '2016-02-01') INCLUSIVE ,
  1939                       PARTITION Mar16 START (date '2016-03-01') INCLUSIVE ,
  1940                       PARTITION Apr16 START (date '2016-04-01') INCLUSIVE ,
  1941                       PARTITION May16 START (date '2016-05-01') INCLUSIVE ,
  1942                       PARTITION Jun16 START (date '2016-06-01') INCLUSIVE ,
  1943                       PARTITION Jul16 START (date '2016-07-01') INCLUSIVE ,
  1944                       PARTITION Aug16 START (date '2016-08-01') INCLUSIVE ,
  1945                       PARTITION Sep16 START (date '2016-09-01') INCLUSIVE ,
  1946                       PARTITION Oct16 START (date '2016-10-01') INCLUSIVE ,
  1947                       PARTITION Nov16 START (date '2016-11-01') INCLUSIVE ,
  1948                       PARTITION Dec16 START (date '2016-12-01') INCLUSIVE
  1949                                       END (date '2017-01-01') EXCLUSIVE);
  1950                     CREATE EXTERNAL TABLE testchema.external_apj (a INT,b DATE,c TEXT,d INT) LOCATION ('gpfdist://127.0.0.1/apj') format 'text';
  1951                     ALTER TABLE testchema.multipartition ALTER PARTITION Dec16 EXCHANGE PARTITION apj WITH TABLE testchema.external_apj WITHOUT VALIDATION;
  1952                     `)
  1953  			timestamp := gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir)
  1954  
  1955  			metadataFileContents := getMetdataFileContents(backupDir, timestamp, "metadata.sql")
  1956  			Expect(metadataFileContents).ToNot(BeEmpty())
  1957  
  1958  			//GPDB7+ has new "attach table" partition syntax, does not require exchanging for external partitions
  1959  			Expect(string(metadataFileContents)).To(ContainSubstring("CREATE READABLE EXTERNAL TABLE testchema.multipartition_1_prt_dec16_2_prt_apj ("))
  1960  			Expect(string(metadataFileContents)).To(ContainSubstring("ALTER TABLE ONLY testchema.multipartition_1_prt_dec16 ATTACH PARTITION testchema.multipartition_1_prt_dec16_2_prt_apj FOR VALUES IN ('apj');"))
  1961  
  1962  			gprestoreArgs := []string{
  1963  				"--timestamp", timestamp,
  1964  				"--redirect-db", "restoredb",
  1965  				"--backup-dir", backupDir}
  1966  			gprestoreCmd := exec.Command(gprestorePath, gprestoreArgs...)
  1967  			_, err := gprestoreCmd.CombinedOutput()
  1968  			Expect(err).ToNot(HaveOccurred())
  1969  
  1970  		})
  1971  	})
  1972  	Describe("Backup and restore multi-layer leaf-partition backups filtered to parent or child tables with intermediate partitions on GPDB7+", func() {
  1973  		BeforeEach(func() {
  1974  			testutils.SkipIfBefore7(backupConn)
  1975  			testhelper.AssertQueryRuns(backupConn, "CREATE SCHEMA schemaone;")
  1976  			// load up two tables with some test data each, to confirm that only one is backed up and restored
  1977  			testhelper.AssertQueryRuns(backupConn, `
  1978                        DROP TABLE IF EXISTS schemaone.measurement CASCADE;
  1979                        CREATE TABLE schemaone.measurement (
  1980                            city_id         int not null,
  1981                            logdate         date not null,
  1982                            peaktemp        int,
  1983                            unitsales       int default 42
  1984                        ) PARTITION BY RANGE (logdate);
  1985  
  1986                        ALTER TABLE schemaone.measurement ADD CONSTRAINT parent_city_id_unique UNIQUE (city_id, logdate, peaktemp, unitsales);
  1987  
  1988                        CREATE TABLE schemaone.measurement_y2006m02 PARTITION OF schemaone.measurement
  1989                            FOR VALUES FROM ('2006-02-01') TO ('2006-03-01')
  1990                            PARTITION BY RANGE (peaktemp);
  1991  
  1992                        ALTER TABLE schemaone.measurement_y2006m02 ADD CONSTRAINT intermediate_check CHECK (peaktemp < 1000);
  1993  
  1994                        CREATE TABLE schemaone.measurement_peaktemp_0_100 PARTITION OF schemaone.measurement_y2006m02
  1995                            FOR VALUES FROM (0) TO (100)
  1996                            PARTITION BY RANGE (unitsales);
  1997  
  1998                        CREATE TABLE schemaone.measurement_peaktemp_catchall PARTITION OF schemaone.measurement_peaktemp_0_100
  1999                            FOR VALUES FROM (1) TO (100);
  2000  
  2001                        CREATE TABLE schemaone.measurement_default PARTITION OF schemaone.measurement_y2006m02 DEFAULT;
  2002  
  2003                        CREATE TABLE schemaone.measurement_y2006m03 PARTITION OF schemaone.measurement
  2004                            FOR VALUES FROM ('2006-03-01') TO ('2006-04-01');
  2005  
  2006                        CREATE TABLE schemaone.measurement_y2007m11 PARTITION OF schemaone.measurement
  2007                            FOR VALUES FROM ('2007-11-01') TO ('2007-12-01');
  2008  
  2009                        CREATE TABLE schemaone.measurement_y2007m12 PARTITION OF schemaone.measurement
  2010                            FOR VALUES FROM ('2007-12-01') TO ('2008-01-01');
  2011  
  2012                        CREATE TABLE schemaone.measurement_y2008m01 PARTITION OF schemaone.measurement
  2013                            FOR VALUES FROM ('2008-01-01') TO ('2008-02-01');
  2014  
  2015                        ALTER TABLE schemaone.measurement_y2008m01 ADD CONSTRAINT city_id_unique UNIQUE (city_id);
  2016  
  2017                        INSERT INTO schemaone.measurement VALUES (42, '2006-02-22', 75, 80);
  2018                        INSERT INTO schemaone.measurement VALUES (42, '2006-03-05', 75, 80);
  2019                        INSERT INTO schemaone.measurement VALUES (42, '2007-12-22', 75, 80);
  2020                        INSERT INTO schemaone.measurement VALUES (42, '2007-12-20', 75, 80);
  2021                        INSERT INTO schemaone.measurement VALUES (42, '2007-11-20', 75, 80);
  2022                        INSERT INTO schemaone.measurement VALUES (42, '2006-02-01', 75, 99);
  2023                        INSERT INTO schemaone.measurement VALUES (42, '2006-02-22', 75, 60);
  2024                        INSERT INTO schemaone.measurement VALUES (42, '2007-11-15', 75, 80);
  2025                     `)
  2026  			defer testhelper.AssertQueryRuns(backupConn, "")
  2027  		})
  2028  
  2029  		AfterEach(func() {
  2030  			testhelper.AssertQueryRuns(backupConn, "DROP SCHEMA IF EXISTS schemaone CASCADE;")
  2031  			testhelper.AssertQueryRuns(restoreConn, "DROP SCHEMA IF EXISTS schemaone CASCADE;")
  2032  		})
  2033  		DescribeTable("",
  2034  			func(includeTableName string, secondaryIncludeTableName string, expectedTableCount string, expectedRootRowCount string, expectedLeafRowCount string) {
  2035  				var timestamp string
  2036  				if secondaryIncludeTableName != "" {
  2037  					timestamp = gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--leaf-partition-data",
  2038  						"--include-table", includeTableName,
  2039  						"--include-table", secondaryIncludeTableName)
  2040  				} else {
  2041  					timestamp = gpbackup(gpbackupPath, backupHelperPath, "--backup-dir", backupDir, "--leaf-partition-data", "--include-table", includeTableName)
  2042  				}
  2043  				testhelper.AssertQueryRuns(restoreConn, "CREATE SCHEMA schemaone;")
  2044  
  2045  				gprestoreArgs := []string{
  2046  					"--timestamp", timestamp,
  2047  					"--redirect-db", "restoredb",
  2048  					"--backup-dir", backupDir}
  2049  				gprestoreCmd := exec.Command(gprestorePath, gprestoreArgs...)
  2050  				_, err := gprestoreCmd.CombinedOutput()
  2051  				Expect(err).ToNot(HaveOccurred())
  2052  
  2053  				tableCount := dbconn.MustSelectString(restoreConn, "SELECT count(*) FROM information_schema.tables where table_schema = 'schemaone';")
  2054  				Expect(tableCount).To(Equal(expectedTableCount))
  2055  
  2056  				rootRowCount := dbconn.MustSelectString(restoreConn, "SELECT count(*) FROM schemaone.measurement;")
  2057  				Expect(rootRowCount).To(Equal(expectedRootRowCount))
  2058  
  2059  				leafRowCount := dbconn.MustSelectString(restoreConn, "SELECT count(*) FROM schemaone.measurement_peaktemp_catchall;")
  2060  				Expect(leafRowCount).To(Equal(expectedLeafRowCount))
  2061  			},
  2062  			Entry("Will correctly handle filtering on child table", "schemaone.measurement_peaktemp_catchall", "", "4", "3", "3"),
  2063  			Entry("Will correctly handle filtering on child table", "schemaone.measurement", "", "9", "8", "3"),
  2064  			Entry("Will correctly handle filtering on child table", "schemaone.measurement", "schemaone.measurement_peaktemp_catchall", "9", "8", "3"),
  2065  		)
  2066  	})
  2067  })