github.com/cloudberrydb/gpbackup@v1.0.3-0.20240118031043-5410fd45eed6/restore/restore.go (about)

     1  package restore
     2  
     3  import (
     4  	"bufio"
     5  	"fmt"
     6  	"os"
     7  	"regexp"
     8  	"runtime/debug"
     9  	"strings"
    10  	"sync"
    11  
    12  	"github.com/cloudberrydb/gp-common-go-libs/cluster"
    13  	"github.com/cloudberrydb/gp-common-go-libs/gplog"
    14  	"github.com/cloudberrydb/gpbackup/filepath"
    15  	"github.com/cloudberrydb/gpbackup/history"
    16  	"github.com/cloudberrydb/gpbackup/options"
    17  	"github.com/cloudberrydb/gpbackup/report"
    18  	"github.com/cloudberrydb/gpbackup/toc"
    19  	"github.com/cloudberrydb/gpbackup/utils"
    20  
    21  	"github.com/pkg/errors"
    22  	"github.com/spf13/cobra"
    23  )
    24  
    25  // This function handles setup that can be done before parsing flags.
    26  func DoInit(cmd *cobra.Command) {
    27  	CleanupGroup = &sync.WaitGroup{}
    28  	CleanupGroup.Add(1)
    29  	gplog.InitializeLogging("gprestore", "")
    30  	SetCmdFlags(cmd.Flags())
    31  	_ = cmd.MarkFlagRequired(options.TIMESTAMP)
    32  	utils.InitializeSignalHandler(DoCleanup, "restore process", &wasTerminated)
    33  }
    34  
    35  /*
    36  * This function handles argument parsing and validation, e.g. checking that a passed filename exists.
    37  * It should only validate; initialization with any sort of side effects should go in DoInit or DoSetup.
    38   */
    39  func DoValidation(cmd *cobra.Command) {
    40  	ValidateFlagCombinations(cmd.Flags())
    41  	err := utils.ValidateFullPath(MustGetFlagString(options.BACKUP_DIR))
    42  	gplog.FatalOnError(err)
    43  	err = utils.ValidateFullPath(MustGetFlagString(options.PLUGIN_CONFIG))
    44  	gplog.FatalOnError(err)
    45  	if !filepath.IsValidTimestamp(MustGetFlagString(options.TIMESTAMP)) {
    46  		gplog.Fatal(errors.Errorf("Timestamp %s is invalid.  Timestamps must be in the format YYYYMMDDHHMMSS.", MustGetFlagString(options.TIMESTAMP)), "")
    47  	}
    48  }
    49  
    50  // This function handles setup that must be done after parsing flags.
    51  func DoSetup() {
    52  	SetLoggerVerbosity()
    53  	gplog.Verbose("Restore Command: %s", os.Args)
    54  
    55  	utils.CheckGpexpandRunning(utils.RestorePreventedByGpexpandMessage)
    56  	restoreStartTime = history.CurrentTimestamp()
    57  	backupTimestamp := MustGetFlagString(options.TIMESTAMP)
    58  	gplog.Info("Restore Key = %s", backupTimestamp)
    59  
    60  	CreateConnectionPool("postgres")
    61  
    62  	var segPrefix string
    63  	var err error
    64  	opts, err = options.NewOptions(cmdFlags)
    65  	gplog.FatalOnError(err)
    66  
    67  	err = opts.QuoteIncludeRelations(connectionPool)
    68  	gplog.FatalOnError(err)
    69  
    70  	err = opts.QuoteExcludeRelations(connectionPool)
    71  	gplog.FatalOnError(err)
    72  
    73  	segConfig := cluster.MustGetSegmentConfiguration(connectionPool)
    74  	globalCluster = cluster.NewCluster(segConfig)
    75  	segPrefix, err = filepath.ParseSegPrefix(MustGetFlagString(options.BACKUP_DIR))
    76  	gplog.FatalOnError(err)
    77  	globalFPInfo = filepath.NewFilePathInfo(globalCluster, MustGetFlagString(options.BACKUP_DIR), backupTimestamp, segPrefix)
    78  
    79  	// Get restore metadata from plugin
    80  	if MustGetFlagString(options.PLUGIN_CONFIG) != "" {
    81  		RecoverMetadataFilesUsingPlugin()
    82  	} else {
    83  		InitializeBackupConfig()
    84  	}
    85  
    86  	ValidateSafeToResizeCluster()
    87  
    88  	gplog.Info("gpbackup version = %s", backupConfig.BackupVersion)
    89  	gplog.Info("gprestore version = %s", GetVersion())
    90  	gplog.Info("Database Version = %s", connectionPool.Version.VersionString)
    91  
    92  	BackupConfigurationValidation()
    93  	metadataFilename := globalFPInfo.GetMetadataFilePath()
    94  	if !backupConfig.DataOnly {
    95  		gplog.Verbose("Metadata will be restored from %s", metadataFilename)
    96  	}
    97  	unquotedRestoreDatabase := utils.UnquoteIdent(backupConfig.DatabaseName)
    98  	if MustGetFlagString(options.REDIRECT_DB) != "" {
    99  		unquotedRestoreDatabase = MustGetFlagString(options.REDIRECT_DB)
   100  	}
   101  	ValidateDatabaseExistence(unquotedRestoreDatabase, MustGetFlagBool(options.CREATE_DB), backupConfig.IncludeTableFiltered || backupConfig.DataOnly)
   102  	if MustGetFlagBool(options.WITH_GLOBALS) {
   103  		restoreGlobal(metadataFilename)
   104  	} else if MustGetFlagBool(options.CREATE_DB) {
   105  		createDatabase(metadataFilename)
   106  	}
   107  	if connectionPool != nil {
   108  		connectionPool.Close()
   109  	}
   110  	InitializeConnectionPool(backupTimestamp, restoreStartTime, unquotedRestoreDatabase)
   111  
   112  	/*
   113  	 * We don't need to validate anything if we're creating the database; we
   114  	 * should not error out for validation reasons once the restore database exists.
   115  	 * For on-error-continue, we will see the same errors later when we try to run SQL,
   116  	 * but since they will not stop the restore, it is not necessary to log them twice.
   117  	 */
   118  	if !MustGetFlagBool(options.CREATE_DB) && !MustGetFlagBool(options.ON_ERROR_CONTINUE) && !MustGetFlagBool(options.INCREMENTAL) {
   119  		relationsToRestore := GenerateRestoreRelationList(*opts)
   120  		if opts.RedirectSchema != "" {
   121  			fqns, err := options.SeparateSchemaAndTable(relationsToRestore)
   122  			gplog.FatalOnError(err)
   123  			redirectRelationsToRestore := make([]string, 0)
   124  			for _, fqn := range fqns {
   125  				redirectRelationsToRestore = append(redirectRelationsToRestore, utils.MakeFQN(opts.RedirectSchema, fqn.TableName))
   126  			}
   127  			relationsToRestore = redirectRelationsToRestore
   128  		}
   129  		ValidateRelationsInRestoreDatabase(connectionPool, relationsToRestore)
   130  	}
   131  
   132  	if opts.RedirectSchema != "" {
   133  		ValidateRedirectSchema(connectionPool, opts.RedirectSchema)
   134  	}
   135  }
   136  
   137  func DoRestore() {
   138  	var filteredDataEntries map[string][]toc.CoordinatorDataEntry
   139  	metadataFilename := globalFPInfo.GetMetadataFilePath()
   140  	isDataOnly := backupConfig.DataOnly || MustGetFlagBool(options.DATA_ONLY)
   141  	isMetadataOnly := backupConfig.MetadataOnly || MustGetFlagBool(options.METADATA_ONLY)
   142  	isIncremental := MustGetFlagBool(options.INCREMENTAL)
   143  
   144  	if isIncremental {
   145  		verifyIncrementalState()
   146  	}
   147  
   148  	if !isDataOnly && !isIncremental {
   149  		restorePredata(metadataFilename)
   150  	} else if isDataOnly {
   151  		// The sequence setval commands need to be run during data only restores since
   152  		// they are arguably the data of the sequence relations and can affect user tables
   153  		// containing columns that reference those sequence relations.
   154  		restoreSequenceValues(metadataFilename)
   155  	}
   156  
   157  	totalTablesRestored := 0
   158  	if !isMetadataOnly {
   159  		if MustGetFlagString(options.PLUGIN_CONFIG) == "" {
   160  			VerifyBackupFileCountOnSegments()
   161  		}
   162  		totalTablesRestored, filteredDataEntries = restoreData()
   163  	}
   164  
   165  	if !isDataOnly && !isIncremental {
   166  		restorePostdata(metadataFilename)
   167  	}
   168  
   169  	if MustGetFlagBool(options.WITH_STATS) && backupConfig.WithStatistics {
   170  		restoreStatistics()
   171  	} else if MustGetFlagBool(options.RUN_ANALYZE) && totalTablesRestored > 0 {
   172  		runAnalyze(filteredDataEntries)
   173  	}
   174  }
   175  
   176  func createDatabase(metadataFilename string) {
   177  	objectTypes := []string{"SESSION GUCS", "DATABASE GUC", "DATABASE", "DATABASE METADATA"}
   178  	dbName := backupConfig.DatabaseName
   179  	gplog.Info("Creating database")
   180  	statements := GetRestoreMetadataStatements("global", metadataFilename, objectTypes, []string{})
   181  	if MustGetFlagString(options.REDIRECT_DB) != "" {
   182  		quotedDBName := utils.QuoteIdent(connectionPool, MustGetFlagString(options.REDIRECT_DB))
   183  		dbName = quotedDBName
   184  		statements = toc.SubstituteRedirectDatabaseInStatements(statements, backupConfig.DatabaseName, quotedDBName)
   185  	}
   186  	numErrors := ExecuteRestoreMetadataStatements(statements, "", nil, utils.PB_NONE, false)
   187  
   188  	if numErrors > 0 {
   189  		gplog.Info("Database creation completed with failures for: %s", dbName)
   190  	} else {
   191  		gplog.Info("Database creation complete for: %s", dbName)
   192  	}
   193  }
   194  
   195  func restoreGlobal(metadataFilename string) {
   196  	objectTypes := []string{"SESSION GUCS", "DATABASE GUC", "DATABASE METADATA", "RESOURCE QUEUE", "RESOURCE GROUP", "ROLE", "ROLE GUCS", "ROLE GRANT", "TABLESPACE"}
   197  	if MustGetFlagBool(options.CREATE_DB) {
   198  		objectTypes = append(objectTypes, "DATABASE")
   199  	}
   200  	gplog.Info("Restoring global metadata")
   201  	statements := GetRestoreMetadataStatements("global", metadataFilename, objectTypes, []string{})
   202  	if MustGetFlagString(options.REDIRECT_DB) != "" {
   203  		quotedDBName := utils.QuoteIdent(connectionPool, MustGetFlagString(options.REDIRECT_DB))
   204  		statements = toc.SubstituteRedirectDatabaseInStatements(statements, backupConfig.DatabaseName, quotedDBName)
   205  	}
   206  	statements = toc.RemoveActiveRole(connectionPool.User, statements)
   207  	numErrors := ExecuteRestoreMetadataStatements(statements, "Global objects", nil, utils.PB_VERBOSE, false)
   208  
   209  	if numErrors > 0 {
   210  		gplog.Info("Global database metadata restore completed with failures")
   211  	} else {
   212  		gplog.Info("Global database metadata restore complete")
   213  	}
   214  }
   215  
   216  func verifyIncrementalState() {
   217  	lastRestorePlanEntry := backupConfig.RestorePlan[len(backupConfig.RestorePlan)-1]
   218  	tableFQNsToRestore := lastRestorePlanEntry.TableFQNs
   219  
   220  	existingSchemas, err := GetExistingSchemas()
   221  	gplog.FatalOnError(err)
   222  	existingTableFQNs, err := GetExistingTableFQNs()
   223  	gplog.FatalOnError(err)
   224  
   225  	existingSchemasMap := make(map[string]Empty)
   226  	for _, schema := range existingSchemas {
   227  		existingSchemasMap[schema] = Empty{}
   228  	}
   229  	existingTablesMap := make(map[string]Empty)
   230  	for _, table := range existingTableFQNs {
   231  		existingTablesMap[table] = Empty{}
   232  	}
   233  
   234  	var schemasToCreate []string
   235  	var tableFQNsToCreate []string
   236  	var schemasExcludedByUserInput []string
   237  	var tablesExcludedByUserInput []string
   238  	for _, table := range tableFQNsToRestore {
   239  		schemaName := strings.Split(table, ".")[0]
   240  		if utils.SchemaIsExcludedByUser(opts.IncludedSchemas, opts.ExcludedSchemas, schemaName) {
   241  			if !utils.Exists(schemasExcludedByUserInput, schemaName) {
   242  				schemasExcludedByUserInput = append(schemasExcludedByUserInput, schemaName)
   243  			}
   244  			tablesExcludedByUserInput = append(tablesExcludedByUserInput, table)
   245  			continue
   246  		}
   247  
   248  		if _, exists := existingTablesMap[table]; !exists {
   249  			if utils.RelationIsExcludedByUser(opts.IncludedRelations, opts.ExcludedRelations, table) {
   250  				tablesExcludedByUserInput = append(tablesExcludedByUserInput, table)
   251  			} else {
   252  				_, schemaExists := existingSchemasMap[schemaName]
   253  				preFilteredToCreate := utils.Exists(schemasToCreate, schemaName)
   254  				if !schemaExists && !preFilteredToCreate {
   255  					schemasToCreate = append(schemasToCreate, schemaName)
   256  				}
   257  				tableFQNsToCreate = append(tableFQNsToCreate, table)
   258  			}
   259  		}
   260  	}
   261  
   262  	var missing []string
   263  	if len(schemasToCreate) > 0 && !MustGetFlagBool(options.ON_ERROR_CONTINUE) {
   264  		missing = schemasToCreate
   265  	}
   266  	if len(tableFQNsToCreate) > 0 && !MustGetFlagBool(options.ON_ERROR_CONTINUE) {
   267  		missing = append(missing, tableFQNsToCreate...)
   268  	}
   269  	if missing != nil {
   270  		err = errors.Errorf("Following objects are missing from the target database: %v", missing)
   271  		gplog.FatalOnError(err)
   272  	}
   273  }
   274  
   275  func restorePredata(metadataFilename string) {
   276  	if wasTerminated {
   277  		return
   278  	}
   279  	gplog.Info("Restoring pre-data metadata")
   280  	// if not incremental restore - assume database is empty and just filter based on user input
   281  	filters := NewFilters(opts.IncludedSchemas, opts.ExcludedSchemas, opts.IncludedRelations, opts.ExcludedRelations)
   282  	var schemaStatements []toc.StatementWithType
   283  	if opts.RedirectSchema == "" {
   284  		schemaStatements = GetRestoreMetadataStatementsFiltered("predata", metadataFilename, []string{"SCHEMA"}, []string{}, filters)
   285  	}
   286  	statements := GetRestoreMetadataStatementsFiltered("predata", metadataFilename, []string{}, []string{"SCHEMA"}, filters)
   287  
   288  	editStatementsRedirectSchema(statements, opts.RedirectSchema)
   289  	progressBar := utils.NewProgressBar(len(schemaStatements)+len(statements), "Pre-data objects restored: ", utils.PB_VERBOSE)
   290  	progressBar.Start()
   291  
   292  	RestoreSchemas(schemaStatements, progressBar)
   293  	numErrors := ExecuteRestoreMetadataStatements(statements, "Pre-data objects", progressBar, utils.PB_VERBOSE, false)
   294  
   295  	progressBar.Finish()
   296  	if wasTerminated {
   297  		gplog.Info("Pre-data metadata restore incomplete")
   298  	} else if numErrors > 0 {
   299  		gplog.Info("Pre-data metadata restore completed with failures")
   300  	} else {
   301  		gplog.Info("Pre-data metadata restore complete")
   302  	}
   303  }
   304  
   305  func restoreSequenceValues(metadataFilename string) {
   306  	if wasTerminated {
   307  		return
   308  	}
   309  	gplog.Info("Restoring sequence values")
   310  
   311  	// if not incremental restore - assume database is empty and just filter based on user input
   312  	filters := NewFilters(opts.IncludedSchemas, opts.ExcludedSchemas, opts.IncludedRelations, opts.ExcludedRelations)
   313  
   314  	// Extract out the setval calls for each SEQUENCE object
   315  	var sequenceValueStatements []toc.StatementWithType
   316  	statements := GetRestoreMetadataStatementsFiltered("predata", metadataFilename, []string{"SEQUENCE"}, []string{}, filters)
   317  	re := regexp.MustCompile(`SELECT pg_catalog.setval\(.*`)
   318  	for _, statement := range statements {
   319  		matches := re.FindStringSubmatch(statement.Statement)
   320  		if len(matches) == 1 {
   321  			statement.Statement = matches[0]
   322  			sequenceValueStatements = append(sequenceValueStatements, statement)
   323  		}
   324  	}
   325  
   326  	numErrors := int32(0)
   327  	if len(sequenceValueStatements) == 0 {
   328  		gplog.Verbose("No sequence values to restore")
   329  	} else {
   330  		progressBar := utils.NewProgressBar(len(sequenceValueStatements), "Sequence values restored: ", utils.PB_VERBOSE)
   331  		progressBar.Start()
   332  		numErrors = ExecuteRestoreMetadataStatements(sequenceValueStatements, "Sequence values", progressBar, utils.PB_VERBOSE, true)
   333  		progressBar.Finish()
   334  	}
   335  
   336  	if wasTerminated {
   337  		gplog.Info("Sequence values restore incomplete")
   338  	} else if numErrors > 0 {
   339  		gplog.Info("Sequence values restore completed with failures")
   340  	} else {
   341  		gplog.Info("Sequence values restore complete")
   342  	}
   343  }
   344  
   345  func editStatementsRedirectSchema(statements []toc.StatementWithType, redirectSchema string) {
   346  	if redirectSchema == "" {
   347  		return
   348  	}
   349  
   350  	for i, statement := range statements {
   351  		oldSchema := fmt.Sprintf("%s.", statement.Schema)
   352  		newSchema := fmt.Sprintf("%s.", redirectSchema)
   353  		statements[i].Schema = redirectSchema
   354  		statements[i].Statement = strings.Replace(statement.Statement, oldSchema, newSchema, 1)
   355  
   356  		// ALTER TABLE schema.root ATTACH PARTITION schema.leaf needs two schema replacements
   357  		if statement.ObjectType == "TABLE" && statement.ReferenceObject != "" {
   358  			alterTableAttachPart := strings.Split(statements[i].Statement, " ATTACH PARTITION ")
   359  
   360  			if len(alterTableAttachPart) == 2 {
   361  				statements[i].Statement = fmt.Sprintf(`%s ATTACH PARTITION %s`,
   362  					alterTableAttachPart[0],
   363  					strings.Replace(alterTableAttachPart[1], oldSchema, newSchema, 1))
   364  			}
   365  		}
   366  
   367  		// only postdata will have a reference object
   368  		if statement.ReferenceObject != "" {
   369  			statements[i].ReferenceObject = strings.Replace(statement.ReferenceObject, oldSchema, newSchema, 1)
   370  		}
   371  	}
   372  }
   373  
   374  func restoreData() (int, map[string][]toc.CoordinatorDataEntry) {
   375  	if wasTerminated {
   376  		return -1, nil
   377  	}
   378  	restorePlan := backupConfig.RestorePlan
   379  	restorePlanEntries := make([]history.RestorePlanEntry, 0)
   380  	if MustGetFlagBool(options.INCREMENTAL) {
   381  		restorePlanEntries = append(restorePlanEntries,
   382  			restorePlan[len(backupConfig.RestorePlan)-1])
   383  	} else {
   384  		for _, restorePlanEntry := range restorePlan {
   385  			restorePlanEntries = append(restorePlanEntries, restorePlanEntry)
   386  		}
   387  	}
   388  
   389  	totalTables := 0
   390  	filteredDataEntries := make(map[string][]toc.CoordinatorDataEntry)
   391  	for _, entry := range restorePlanEntries {
   392  		fpInfo := GetBackupFPInfoForTimestamp(entry.Timestamp)
   393  		tocfile := toc.NewTOC(fpInfo.GetTOCFilePath())
   394  		restorePlanTableFQNs := entry.TableFQNs
   395  		filteredDataEntriesForTimestamp := tocfile.GetDataEntriesMatching(opts.IncludedSchemas,
   396  			opts.ExcludedSchemas, opts.IncludedRelations, opts.ExcludedRelations, restorePlanTableFQNs)
   397  		filteredDataEntries[entry.Timestamp] = filteredDataEntriesForTimestamp
   398  		totalTables += len(filteredDataEntriesForTimestamp)
   399  	}
   400  	dataProgressBar := utils.NewProgressBar(totalTables, "Tables restored: ", utils.PB_INFO)
   401  	dataProgressBar.Start()
   402  
   403  	gucStatements := setGUCsForConnection(nil, 0)
   404  	numErrors := int32(0)
   405  	for timestamp, entries := range filteredDataEntries {
   406  		gplog.Verbose("Restoring data for %d tables from backup with timestamp: %s", len(entries), timestamp)
   407  		numErrors = restoreDataFromTimestamp(GetBackupFPInfoForTimestamp(timestamp), entries, gucStatements, dataProgressBar)
   408  	}
   409  
   410  	dataProgressBar.Finish()
   411  	if wasTerminated {
   412  		gplog.Info("Data restore incomplete")
   413  	} else if numErrors > 0 {
   414  		gplog.Info("Data restore completed with failures")
   415  	} else {
   416  		gplog.Info("Data restore complete")
   417  	}
   418  
   419  	return totalTables, filteredDataEntries
   420  }
   421  
   422  func restorePostdata(metadataFilename string) {
   423  	if wasTerminated {
   424  		return
   425  	}
   426  	gplog.Info("Restoring post-data metadata")
   427  
   428  	filters := NewFilters(opts.IncludedSchemas, opts.ExcludedSchemas, opts.IncludedRelations, opts.ExcludedRelations)
   429  
   430  	statements := GetRestoreMetadataStatementsFiltered("postdata", metadataFilename, []string{}, []string{}, filters)
   431  	editStatementsRedirectSchema(statements, opts.RedirectSchema)
   432  	firstBatch, secondBatch, thirdBatch := BatchPostdataStatements(statements)
   433  	progressBar := utils.NewProgressBar(len(statements), "Post-data objects restored: ", utils.PB_VERBOSE)
   434  	progressBar.Start()
   435  
   436  	numErrors := ExecuteRestoreMetadataStatements(firstBatch, "", progressBar, utils.PB_VERBOSE, connectionPool.NumConns > 1)
   437  	numErrors += ExecuteRestoreMetadataStatements(secondBatch, "", progressBar, utils.PB_VERBOSE, connectionPool.NumConns > 1)
   438  	numErrors += ExecuteRestoreMetadataStatements(thirdBatch, "", progressBar, utils.PB_VERBOSE, connectionPool.NumConns > 1)
   439  	progressBar.Finish()
   440  
   441  	if wasTerminated {
   442  		gplog.Info("Post-data metadata restore incomplete")
   443  	} else if numErrors > 0 {
   444  		gplog.Info("Post-data metadata restore completed with failures")
   445  	} else {
   446  		gplog.Info("Post-data metadata restore complete")
   447  	}
   448  }
   449  
   450  func restoreStatistics() {
   451  	if wasTerminated {
   452  		return
   453  	}
   454  	statisticsFilename := globalFPInfo.GetStatisticsFilePath()
   455  	gplog.Info("Restoring query planner statistics from %s", statisticsFilename)
   456  
   457  	filters := NewFilters(opts.IncludedSchemas, opts.ExcludedSchemas, opts.IncludedRelations, opts.ExcludedRelations)
   458  
   459  	statements := GetRestoreMetadataStatementsFiltered("statistics", statisticsFilename, []string{}, []string{}, filters)
   460  	editStatementsRedirectSchema(statements, opts.RedirectSchema)
   461  	numErrors := ExecuteRestoreMetadataStatements(statements, "Table statistics", nil, utils.PB_VERBOSE, false)
   462  
   463  	if numErrors > 0 {
   464  		gplog.Info("Query planner statistics restore completed with failures")
   465  	} else {
   466  		gplog.Info("Query planner statistics restore complete")
   467  	}
   468  }
   469  
   470  func runAnalyze(filteredDataEntries map[string][]toc.CoordinatorDataEntry) {
   471  	if wasTerminated {
   472  		return
   473  	}
   474  	gplog.Info("Running ANALYZE on restored tables")
   475  
   476  	var analyzeStatements []toc.StatementWithType
   477  	for _, dataEntries := range filteredDataEntries {
   478  		for _, entry := range dataEntries {
   479  			tableSchema := entry.Schema
   480  			if opts.RedirectSchema != "" {
   481  				tableSchema = opts.RedirectSchema
   482  			}
   483  			tableFQN := utils.MakeFQN(tableSchema, entry.Name)
   484  			analyzeCommand := fmt.Sprintf("ANALYZE %s", tableFQN)
   485  
   486  			newAnalyzeStatement := toc.StatementWithType{
   487  				Schema:    tableSchema,
   488  				Name:      entry.Name,
   489  				Statement: analyzeCommand,
   490  			}
   491  			analyzeStatements = append(analyzeStatements, newAnalyzeStatement)
   492  		}
   493  	}
   494  
   495  	// Only GPDB 5+ has leaf partition stats merged up to the root
   496  	// automatically. Against GPDB 4.3, we must extract the root partitions
   497  	// from the leaf partition info and run ANALYZE ROOTPARTITION on the root
   498  	// partitions. These particular ANALYZE ROOTPARTITION statements should run
   499  	// last so add them to the end of the analyzeStatements list.
   500  	if connectionPool.Version.Is("4") {
   501  		// Create root partition set
   502  		partitionRootSet := map[toc.StatementWithType]struct{}{}
   503  		for _, dataEntries := range filteredDataEntries {
   504  			for _, entry := range dataEntries {
   505  				if entry.PartitionRoot != "" {
   506  					tableSchema := entry.Schema
   507  					if opts.RedirectSchema != "" {
   508  						tableSchema = opts.RedirectSchema
   509  					}
   510  					rootFQN := utils.MakeFQN(tableSchema, entry.PartitionRoot)
   511  					analyzeCommand := fmt.Sprintf("ANALYZE ROOTPARTITION %s", rootFQN)
   512  					rootStatement := toc.StatementWithType{
   513  						Schema:    tableSchema,
   514  						Name:      entry.PartitionRoot,
   515  						Statement: analyzeCommand,
   516  					}
   517  
   518  					if _, ok := partitionRootSet[rootStatement]; !ok {
   519  						partitionRootSet[rootStatement] = struct{}{}
   520  					}
   521  				}
   522  			}
   523  		}
   524  
   525  		for rootAnalyzeStatement, _ := range partitionRootSet {
   526  			analyzeStatements = append(analyzeStatements, rootAnalyzeStatement)
   527  		}
   528  	}
   529  
   530  	progressBar := utils.NewProgressBar(len(analyzeStatements), "Tables analyzed: ", utils.PB_VERBOSE)
   531  	progressBar.Start()
   532  	numErrors := ExecuteStatements(analyzeStatements, progressBar, connectionPool.NumConns > 1)
   533  	progressBar.Finish()
   534  
   535  	if wasTerminated {
   536  		gplog.Info("ANALYZE on restored tables incomplete")
   537  	} else if numErrors > 0 {
   538  		gplog.Info("ANALYZE on restored tables completed with failures")
   539  	} else {
   540  		gplog.Info("ANALYZE on restored tables complete")
   541  	}
   542  
   543  }
   544  
   545  func DoTeardown() {
   546  	restoreFailed := false
   547  	defer func() {
   548  		DoCleanup(restoreFailed)
   549  
   550  		errorCode := gplog.GetErrorCode()
   551  		if errorCode == 0 {
   552  			gplog.Info("Restore completed successfully")
   553  		}
   554  		os.Exit(errorCode)
   555  
   556  	}()
   557  
   558  	errStr := ""
   559  	if err := recover(); err != nil {
   560  		// Check if gplog.Fatal did not cause the panic
   561  		if gplog.GetErrorCode() != 2 {
   562  			gplog.Error(fmt.Sprintf("%v: %s", err, debug.Stack()))
   563  			gplog.SetErrorCode(2)
   564  		} else {
   565  			errStr = fmt.Sprintf("%+v", err)
   566  		}
   567  		restoreFailed = true
   568  	}
   569  	if wasTerminated {
   570  		/*
   571  		 * Don't print an error if the restore was canceled, as the signal handler
   572  		 * will take care of cleanup and return codes.  Just wait until the signal
   573  		 * handler's DoCleanup completes so the main goroutine doesn't exit while
   574  		 * cleanup is still in progress.
   575  		 */
   576  		CleanupGroup.Wait()
   577  		restoreFailed = true
   578  		return
   579  	}
   580  	if errStr != "" {
   581  		fmt.Println(errStr)
   582  	}
   583  	errMsg := report.ParseErrorMessage(errStr)
   584  
   585  	if globalFPInfo.Timestamp != "" {
   586  		_, statErr := os.Stat(globalFPInfo.GetDirForContent(-1))
   587  		if statErr != nil { // Even if this isn't os.IsNotExist, don't try to write a report file in case of further errors
   588  			return
   589  		}
   590  		reportFilename := globalFPInfo.GetRestoreReportFilePath(restoreStartTime)
   591  		origSize, destSize, _ := GetResizeClusterInfo()
   592  		report.WriteRestoreReportFile(reportFilename, globalFPInfo.Timestamp, restoreStartTime, connectionPool, version, origSize, destSize, errMsg)
   593  		report.EmailReport(globalCluster, globalFPInfo.Timestamp, reportFilename, "gprestore", !restoreFailed)
   594  		if pluginConfig != nil {
   595  			pluginConfig.CleanupPluginForRestore(globalCluster, globalFPInfo)
   596  			pluginConfig.DeletePluginConfigWhenEncrypting(globalCluster)
   597  		}
   598  		if len(errorTablesMetadata) > 0 {
   599  			// tables with metadata errors
   600  			writeErrorTables(true)
   601  		}
   602  		if len(errorTablesData) > 0 {
   603  			// tables with data errors
   604  			writeErrorTables(false)
   605  		}
   606  	}
   607  }
   608  
   609  func writeErrorTables(isMetadata bool) {
   610  	var errorTables *map[string]Empty
   611  	var errorFilename string
   612  
   613  	if isMetadata == true {
   614  		errorFilename = globalFPInfo.GetErrorTablesMetadataFilePath(restoreStartTime)
   615  		errorTables = &errorTablesMetadata
   616  		gplog.Verbose("Logging error tables during metadata restore in %s", errorFilename)
   617  	} else {
   618  		errorFilename = globalFPInfo.GetErrorTablesDataFilePath(restoreStartTime)
   619  		errorTables = &errorTablesData
   620  		gplog.Verbose("Logging error tables during data restore in %s", errorFilename)
   621  	}
   622  
   623  	errorFile, err := os.OpenFile(errorFilename, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
   624  	gplog.FatalOnError(err)
   625  	errorWriter := bufio.NewWriter(errorFile)
   626  	start := true
   627  	for table := range *errorTables {
   628  		if start == false {
   629  			_, _ = errorWriter.WriteString("\n")
   630  		} else {
   631  			start = false
   632  		}
   633  		_, _ = errorWriter.WriteString(table)
   634  	}
   635  	err = errorWriter.Flush()
   636  	err = errorFile.Close()
   637  	gplog.FatalOnError(err)
   638  	err = os.Chmod(errorFilename, 0444)
   639  	gplog.FatalOnError(err)
   640  }
   641  
   642  func DoCleanup(restoreFailed bool) {
   643  	defer func() {
   644  		if err := recover(); err != nil {
   645  			gplog.Warn("Encountered error during cleanup: %+v", err)
   646  		}
   647  		gplog.Verbose("Cleanup complete")
   648  		CleanupGroup.Done()
   649  	}()
   650  
   651  	gplog.Verbose("Beginning cleanup")
   652  	if backupConfig != nil && backupConfig.SingleDataFile {
   653  		fpInfoList := GetBackupFPInfoListFromRestorePlan()
   654  		for _, fpInfo := range fpInfoList {
   655  			// Copy sessions must be terminated before cleaning up gpbackup_helper processes to avoid a potential deadlock
   656  			// If the terminate query is sent via a connection with an active COPY command, and the COPY's pipe is cleaned up, the COPY query will hang.
   657  			// This results in the DoCleanup function passed to the signal handler to never return, blocking the os.Exit call
   658  			if wasTerminated { // These should all end on their own in a successful restore
   659  				utils.TerminateHangingCopySessions(connectionPool, fpInfo, fmt.Sprintf("gprestore_%s_%s", fpInfo.Timestamp, restoreStartTime))
   660  			}
   661  			if restoreFailed {
   662  				utils.CleanUpSegmentHelperProcesses(globalCluster, fpInfo, "restore")
   663  			}
   664  			utils.CleanUpHelperFilesOnAllHosts(globalCluster, fpInfo)
   665  		}
   666  	}
   667  
   668  	if connectionPool != nil {
   669  		connectionPool.Close()
   670  	}
   671  }