github.com/1aal/kubeblocks@v0.0.0-20231107070852-e1c03e598921/pkg/lorry/engines/postgres/officalpostgres/manager.go (about)

     1  /*
     2  Copyright (C) 2022-2023 ApeCloud Co., Ltd
     3  
     4  This file is part of KubeBlocks project
     5  
     6  This program is free software: you can redistribute it and/or modify
     7  it under the terms of the GNU Affero General Public License as published by
     8  the Free Software Foundation, either version 3 of the License, or
     9  (at your option) any later version.
    10  
    11  This program is distributed in the hope that it will be useful
    12  but WITHOUT ANY WARRANTY; without even the implied warranty of
    13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    14  GNU Affero General Public License for more details.
    15  
    16  You should have received a copy of the GNU Affero General Public License
    17  along with this program.  If not, see <http://www.gnu.org/licenses/>.
    18  */
    19  
    20  package officalpostgres
    21  
    22  import (
    23  	"bufio"
    24  	"context"
    25  	"fmt"
    26  	"os"
    27  	"path/filepath"
    28  	"sort"
    29  	"strconv"
    30  	"strings"
    31  	"time"
    32  
    33  	"github.com/pkg/errors"
    34  	"github.com/spf13/afero"
    35  	"github.com/spf13/cast"
    36  	"golang.org/x/exp/slices"
    37  
    38  	"github.com/1aal/kubeblocks/pkg/lorry/dcs"
    39  	"github.com/1aal/kubeblocks/pkg/lorry/engines"
    40  	"github.com/1aal/kubeblocks/pkg/lorry/engines/models"
    41  	"github.com/1aal/kubeblocks/pkg/lorry/engines/postgres"
    42  )
    43  
    44  type Manager struct {
    45  	postgres.Manager
    46  	syncStandbys   *postgres.PGStandby
    47  	recoveryParams map[string]map[string]string
    48  	pgControlData  map[string]string
    49  }
    50  
    51  var _ engines.DBManager = &Manager{}
    52  
    53  var Mgr *Manager
    54  
    55  var fs = afero.NewOsFs()
    56  
    57  func NewManager(properties engines.Properties) (engines.DBManager, error) {
    58  	Mgr = &Manager{}
    59  
    60  	baseManager, err := postgres.NewManager(properties)
    61  	if err != nil {
    62  		return nil, errors.Errorf("new base manager failed, err: %v", err)
    63  	}
    64  
    65  	Mgr.Manager = *baseManager.(*postgres.Manager)
    66  	return Mgr, nil
    67  }
    68  
    69  func (mgr *Manager) InitializeCluster(context.Context, *dcs.Cluster) error {
    70  	return nil
    71  }
    72  
    73  func (mgr *Manager) IsCurrentMemberInCluster(context.Context, *dcs.Cluster) bool {
    74  	return true
    75  }
    76  
    77  func (mgr *Manager) JoinCurrentMemberToCluster(context.Context, *dcs.Cluster) error {
    78  	return nil
    79  }
    80  
    81  func (mgr *Manager) LeaveMemberFromCluster(context.Context, *dcs.Cluster, string) error {
    82  	return nil
    83  }
    84  
    85  func (mgr *Manager) IsClusterInitialized(context.Context, *dcs.Cluster) (bool, error) {
    86  	// for replication, the setup script imposes a constraint where the successful startup of the primary database (db0)
    87  	// is a prerequisite for the successful launch of the remaining databases.
    88  	return mgr.IsDBStartupReady(), nil
    89  }
    90  
    91  func (mgr *Manager) cleanDBState() {
    92  	mgr.UnsetIsLeader()
    93  	mgr.recoveryParams = nil
    94  	mgr.syncStandbys = nil
    95  	mgr.pgControlData = nil
    96  	mgr.DBState = &dcs.DBState{
    97  		Extra: map[string]string{},
    98  	}
    99  }
   100  
   101  func (mgr *Manager) GetDBState(ctx context.Context, cluster *dcs.Cluster) *dcs.DBState {
   102  	mgr.cleanDBState()
   103  
   104  	isLeader, err := mgr.IsLeader(ctx, cluster)
   105  	if err != nil {
   106  		mgr.Logger.Error(err, "check is leader failed")
   107  		return nil
   108  	}
   109  	mgr.SetIsLeader(isLeader)
   110  
   111  	replicationMode, err := mgr.getReplicationMode(ctx)
   112  	if err != nil {
   113  		mgr.Logger.Error(err, "get replication mode failed")
   114  		return nil
   115  	}
   116  	mgr.DBState.Extra[postgres.ReplicationMode] = replicationMode
   117  
   118  	if replicationMode == postgres.Synchronous && cluster.Leader != nil && cluster.Leader.Name == mgr.CurrentMemberName {
   119  		syncStandbys := mgr.getSyncStandbys(ctx)
   120  		if syncStandbys != nil {
   121  			mgr.syncStandbys = syncStandbys
   122  			mgr.DBState.Extra[postgres.SyncStandBys] = strings.Join(syncStandbys.Members.ToSlice(), ",")
   123  		}
   124  	}
   125  
   126  	walPosition, err := mgr.getWalPositionWithHost(ctx, "")
   127  	if err != nil {
   128  		mgr.Logger.Error(err, "get wal position failed")
   129  		return nil
   130  	}
   131  	mgr.DBState.OpTimestamp = walPosition
   132  
   133  	timeLine := mgr.getTimeLineWithHost(ctx, "")
   134  	if timeLine == 0 {
   135  		mgr.Logger.Error(err, "get received timeLine failed")
   136  		return nil
   137  	}
   138  	mgr.DBState.Extra[postgres.TimeLine] = strconv.FormatInt(timeLine, 10)
   139  
   140  	if !isLeader {
   141  		recoveryParams, err := mgr.readRecoveryParams(ctx)
   142  		if err != nil {
   143  			mgr.Logger.Error(nil, "get recoveryParams failed", "err", err)
   144  			return nil
   145  		}
   146  		mgr.recoveryParams = recoveryParams
   147  	}
   148  
   149  	pgControlData := mgr.getPgControlData()
   150  	if pgControlData == nil {
   151  		mgr.Logger.Error(err, "get pg controlData failed")
   152  		return nil
   153  	}
   154  	mgr.pgControlData = pgControlData
   155  
   156  	return mgr.DBState
   157  }
   158  
   159  func (mgr *Manager) IsLeader(ctx context.Context, cluster *dcs.Cluster) (bool, error) {
   160  	isSet, isLeader := mgr.GetIsLeader()
   161  	if isSet {
   162  		return isLeader, nil
   163  	}
   164  
   165  	return mgr.IsLeaderWithHost(ctx, "")
   166  }
   167  
   168  func (mgr *Manager) IsLeaderWithHost(ctx context.Context, host string) (bool, error) {
   169  	role, err := mgr.GetMemberRoleWithHost(ctx, host)
   170  	if err != nil {
   171  		return false, errors.Errorf("check is leader with host:%s failed, err:%v", host, err)
   172  	}
   173  
   174  	mgr.Logger.Info(fmt.Sprintf("get member:%s role:%s", host, role))
   175  	return role == models.PRIMARY, nil
   176  }
   177  
   178  func (mgr *Manager) IsDBStartupReady() bool {
   179  	ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
   180  	defer cancel()
   181  	if mgr.DBStartupReady {
   182  		return true
   183  	}
   184  
   185  	if !mgr.IsPgReady(ctx) {
   186  		return false
   187  	}
   188  
   189  	mgr.DBStartupReady = true
   190  	mgr.Logger.Info("DB startup ready")
   191  	return true
   192  }
   193  
   194  func (mgr *Manager) GetMemberRoleWithHost(ctx context.Context, host string) (string, error) {
   195  	sql := "select pg_is_in_recovery();"
   196  
   197  	resp, err := mgr.QueryWithHost(ctx, sql, host)
   198  	if err != nil {
   199  		mgr.Logger.Error(err, "get member role failed")
   200  		return "", err
   201  	}
   202  
   203  	result, err := postgres.ParseQuery(string(resp))
   204  	if err != nil {
   205  		mgr.Logger.Error(err, "parse member role failed")
   206  		return "", err
   207  	}
   208  
   209  	if cast.ToBool(result[0]["pg_is_in_recovery"]) {
   210  		return models.SECONDARY, nil
   211  	} else {
   212  		return models.PRIMARY, nil
   213  	}
   214  }
   215  
   216  func (mgr *Manager) GetMemberAddrs(ctx context.Context, cluster *dcs.Cluster) []string {
   217  	return cluster.GetMemberAddrs()
   218  }
   219  
   220  func (mgr *Manager) IsCurrentMemberHealthy(ctx context.Context, cluster *dcs.Cluster) bool {
   221  	return mgr.IsMemberHealthy(ctx, cluster, cluster.GetMemberWithName(mgr.CurrentMemberName))
   222  }
   223  
   224  func (mgr *Manager) IsMemberHealthy(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) bool {
   225  	var host string
   226  	if member.Name != mgr.CurrentMemberName {
   227  		host = cluster.GetMemberAddr(*member)
   228  	}
   229  
   230  	if cluster.Leader != nil && cluster.Leader.Name == member.Name {
   231  		if !mgr.WriteCheck(ctx, host) {
   232  			return false
   233  		}
   234  	}
   235  	if !mgr.ReadCheck(ctx, host) {
   236  		return false
   237  	}
   238  
   239  	return true
   240  }
   241  
   242  func (mgr *Manager) IsMemberLagging(ctx context.Context, cluster *dcs.Cluster, member *dcs.Member) (bool, int64) {
   243  	if cluster.Leader == nil || cluster.Leader.DBState == nil {
   244  		mgr.Logger.Info("No leader DBState info")
   245  		return false, 0
   246  	}
   247  	maxLag := cluster.HaConfig.GetMaxLagOnSwitchover()
   248  
   249  	var host string
   250  	if member.Name != mgr.CurrentMemberName {
   251  		host = cluster.GetMemberAddr(*member)
   252  	}
   253  
   254  	replicationMode, err := mgr.getReplicationMode(ctx)
   255  	if err != nil {
   256  		mgr.Logger.Error(err, "get db replication mode failed")
   257  		return true, maxLag + 1
   258  	}
   259  
   260  	if replicationMode == postgres.Synchronous {
   261  		if !mgr.checkStandbySynchronizedToLeader(true, cluster) {
   262  			return true, maxLag + 1
   263  		}
   264  	}
   265  
   266  	timeLine := mgr.getTimeLineWithHost(ctx, host)
   267  	if timeLine == 0 {
   268  		mgr.Logger.Error(err, "get timeline with host:%s failed")
   269  		return true, maxLag + 1
   270  	}
   271  	clusterTimeLine := cast.ToInt64(cluster.Leader.DBState.Extra[postgres.TimeLine])
   272  	if clusterTimeLine != 0 && clusterTimeLine != timeLine {
   273  		return true, maxLag + 1
   274  	}
   275  
   276  	walPosition, err := mgr.getWalPositionWithHost(ctx, host)
   277  	if err != nil {
   278  		mgr.Logger.Error(err, "check member lagging failed")
   279  		return true, maxLag + 1
   280  	}
   281  
   282  	return cluster.Leader.DBState.OpTimestamp-walPosition > cluster.HaConfig.GetMaxLagOnSwitchover(), cluster.Leader.DBState.OpTimestamp - walPosition
   283  }
   284  
   285  // Typically, the synchronous_commit parameter remains consistent between the primary and standby
   286  func (mgr *Manager) getReplicationMode(ctx context.Context) (string, error) {
   287  	if mgr.DBState != nil && mgr.DBState.Extra[postgres.ReplicationMode] != "" {
   288  		return mgr.DBState.Extra[postgres.ReplicationMode], nil
   289  	}
   290  
   291  	synchronousCommit, err := mgr.GetPgCurrentSetting(ctx, "synchronous_commit")
   292  	if err != nil {
   293  		return "", err
   294  	}
   295  
   296  	switch synchronousCommit {
   297  	case "off":
   298  		return postgres.Asynchronous, nil
   299  	case "local":
   300  		return postgres.Asynchronous, nil
   301  	case "remote_write":
   302  		return postgres.Asynchronous, nil
   303  	case "on":
   304  		return postgres.Synchronous, nil
   305  	case "remote_apply":
   306  		return postgres.Synchronous, nil
   307  	default: // default "on"
   308  		return postgres.Synchronous, nil
   309  	}
   310  }
   311  
   312  func (mgr *Manager) getWalPositionWithHost(ctx context.Context, host string) (int64, error) {
   313  	if mgr.DBState != nil && mgr.DBState.OpTimestamp != 0 && host == "" {
   314  		return mgr.DBState.OpTimestamp, nil
   315  	}
   316  
   317  	var (
   318  		lsn      int64
   319  		isLeader bool
   320  		err      error
   321  	)
   322  
   323  	if host == "" {
   324  		isLeader, err = mgr.IsLeader(ctx, nil)
   325  	} else {
   326  		isLeader, err = mgr.IsLeaderWithHost(ctx, host)
   327  	}
   328  	if err != nil {
   329  		return 0, err
   330  	}
   331  
   332  	if isLeader {
   333  		lsn, err = mgr.getLsnWithHost(ctx, "current", host)
   334  		if err != nil {
   335  			return 0, err
   336  		}
   337  	} else {
   338  		replayLsn, errReplay := mgr.getLsnWithHost(ctx, "replay", host)
   339  		receiveLsn, errReceive := mgr.getLsnWithHost(ctx, "receive", host)
   340  		if errReplay != nil && errReceive != nil {
   341  			return 0, errors.Errorf("get replayLsn or receiveLsn failed, replayLsn err:%v, receiveLsn err:%v", errReplay, errReceive)
   342  		}
   343  		lsn = engines.MaxInt64(replayLsn, receiveLsn)
   344  	}
   345  
   346  	return lsn, nil
   347  }
   348  
   349  func (mgr *Manager) getLsnWithHost(ctx context.Context, types string, host string) (int64, error) {
   350  	var sql string
   351  	switch types {
   352  	case "current":
   353  		sql = "select pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), '0/0')::bigint;"
   354  	case "replay":
   355  		sql = "select pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_replay_lsn(), '0/0')::bigint;"
   356  	case "receive":
   357  		sql = "select pg_catalog.pg_wal_lsn_diff(COALESCE(pg_catalog.pg_last_wal_receive_lsn(), '0/0'), '0/0')::bigint;"
   358  	}
   359  
   360  	resp, err := mgr.QueryWithHost(ctx, sql, host)
   361  	if err != nil {
   362  		mgr.Logger.Error(err, "get wal position failed")
   363  		return 0, err
   364  	}
   365  
   366  	resMap, err := postgres.ParseQuery(string(resp))
   367  	if err != nil {
   368  		return 0, errors.Errorf("parse query response:%s failed, err:%v", string(resp), err)
   369  	}
   370  
   371  	return cast.ToInt64(resMap[0]["pg_wal_lsn_diff"]), nil
   372  }
   373  
   374  // only the leader has this information.
   375  func (mgr *Manager) getSyncStandbys(ctx context.Context) *postgres.PGStandby {
   376  	if mgr.syncStandbys != nil {
   377  		return mgr.syncStandbys
   378  	}
   379  
   380  	synchronousStandbyNames, err := mgr.GetPgCurrentSetting(ctx, "synchronous_standby_names")
   381  	if err != nil {
   382  		mgr.Logger.Error(err, "get synchronous_standby_names failed")
   383  		return nil
   384  	}
   385  
   386  	syncStandbys, err := postgres.ParsePGSyncStandby(synchronousStandbyNames)
   387  	if err != nil {
   388  		mgr.Logger.Error(err, "parse pg sync standby failed")
   389  		return nil
   390  	}
   391  	return syncStandbys
   392  }
   393  
   394  func (mgr *Manager) checkStandbySynchronizedToLeader(checkLeader bool, cluster *dcs.Cluster) bool {
   395  	if cluster.Leader == nil || cluster.Leader.DBState == nil {
   396  		return false
   397  	}
   398  	syncStandBysStr := cluster.Leader.DBState.Extra[postgres.SyncStandBys]
   399  	syncStandBys := strings.Split(syncStandBysStr, ",")
   400  
   401  	return (checkLeader && mgr.CurrentMemberName == cluster.Leader.Name) || slices.Contains(syncStandBys, mgr.CurrentMemberName)
   402  }
   403  
   404  func (mgr *Manager) handleRewind(ctx context.Context, cluster *dcs.Cluster) error {
   405  	needRewind := mgr.checkTimelineAndLsn(ctx, cluster)
   406  	if !needRewind {
   407  		return nil
   408  	}
   409  
   410  	if !mgr.canRewind() {
   411  		return nil
   412  	}
   413  
   414  	return mgr.executeRewind(ctx)
   415  }
   416  
   417  func (mgr *Manager) canRewind() bool {
   418  	_, err := postgres.PgRewind("--help")
   419  	if err != nil {
   420  		mgr.Logger.Error(err, "unable to execute pg_rewind")
   421  		return false
   422  	}
   423  
   424  	pgControlData := mgr.getPgControlData()
   425  	if pgControlData["wal_log_hints setting"] != "on" && pgControlData["Data page checksum version"] == "0" {
   426  		mgr.Logger.Info("unable to execute pg_rewind due to configuration not allowed")
   427  	}
   428  
   429  	return true
   430  }
   431  
   432  func (mgr *Manager) executeRewind(ctx context.Context) error {
   433  	if mgr.IsRunning() {
   434  		return errors.New("can't run rewind when pg is running")
   435  	}
   436  
   437  	err := mgr.checkArchiveReadyWal(ctx)
   438  	if err != nil {
   439  		return err
   440  	}
   441  
   442  	// TODO: checkpoint
   443  
   444  	return nil
   445  }
   446  
   447  func (mgr *Manager) checkArchiveReadyWal(ctx context.Context) error {
   448  	archiveMode, _ := mgr.GetPgCurrentSetting(ctx, "archive_mode")
   449  	archiveCommand, _ := mgr.GetPgCurrentSetting(ctx, "archive_command")
   450  
   451  	if (archiveMode != "on" && archiveMode != "always") || archiveCommand == "" {
   452  		mgr.Logger.Info("archive is not enabled")
   453  		return nil
   454  	}
   455  
   456  	// starting from PostgreSQL 10, the "wal" directory has been renamed to "pg_wal"
   457  	archiveDir := mgr.DataDir + "pg_wal/archive_status"
   458  	var walFileList []string
   459  	err := filepath.Walk(archiveDir, func(path string, info os.FileInfo, err error) error {
   460  		if err != nil {
   461  			return err
   462  		}
   463  
   464  		fileName := strings.Split(info.Name(), ".")
   465  		if len(fileName) == 2 && fileName[1] == "ready" {
   466  			walFileList = append(walFileList, fileName[0])
   467  		}
   468  
   469  		return nil
   470  	})
   471  	if err != nil {
   472  		return err
   473  	}
   474  	if len(walFileList) == 0 {
   475  		mgr.Logger.Info("no ready wal file exist")
   476  		return nil
   477  	}
   478  
   479  	sort.Strings(walFileList)
   480  	for _, wal := range walFileList {
   481  		walFileName := archiveDir + wal + ".ready"
   482  		_, err = postgres.ExecCommand(buildArchiverCommand(archiveCommand, wal, archiveDir))
   483  		if err != nil {
   484  			return err
   485  		}
   486  
   487  		err = fs.Rename(walFileName, archiveDir+wal+".done")
   488  		if err != nil {
   489  			return err
   490  		}
   491  	}
   492  
   493  	return nil
   494  }
   495  
   496  func buildArchiverCommand(archiveCommand, walFileName, walDir string) string {
   497  	cmd := ""
   498  
   499  	i := 0
   500  	archiveCommandLength := len(archiveCommand)
   501  	for i < archiveCommandLength {
   502  		if archiveCommand[i] == '%' && i+1 < archiveCommandLength {
   503  			i += 1
   504  			switch archiveCommand[i] {
   505  			case 'p':
   506  				cmd += walDir + walFileName
   507  			case 'f':
   508  				cmd += walFileName
   509  			case 'r':
   510  				cmd += "000000010000000000000001"
   511  			case '%':
   512  				cmd += "%"
   513  			default:
   514  				cmd += "%"
   515  				i -= 1
   516  			}
   517  		} else {
   518  			cmd += string(archiveCommand[i])
   519  		}
   520  		i += 1
   521  	}
   522  
   523  	return cmd
   524  }
   525  
   526  func (mgr *Manager) checkTimelineAndLsn(ctx context.Context, cluster *dcs.Cluster) bool {
   527  	var needRewind bool
   528  	var history *postgres.HistoryFile
   529  
   530  	isRecovery, localTimeLine, localLsn := mgr.getLocalTimeLineAndLsn(ctx)
   531  	if localTimeLine == 0 || localLsn == 0 {
   532  		return false
   533  	}
   534  
   535  	isLeader, err := mgr.IsLeaderWithHost(ctx, cluster.GetMemberAddr(*cluster.GetLeaderMember()))
   536  	if err != nil || !isLeader {
   537  		mgr.Logger.Error(nil, "Leader is still in recovery and can't rewind")
   538  		return false
   539  	}
   540  
   541  	primaryTimeLine, err := mgr.getPrimaryTimeLine(cluster.GetMemberAddr(*cluster.GetLeaderMember()))
   542  	if err != nil {
   543  		mgr.Logger.Error(err, "get primary timeLine failed")
   544  		return false
   545  	}
   546  
   547  	switch {
   548  	case localTimeLine > primaryTimeLine:
   549  		needRewind = true
   550  	case localTimeLine == primaryTimeLine:
   551  		needRewind = false
   552  	case localTimeLine < primaryTimeLine:
   553  		history = mgr.getHistory(cluster.GetMemberAddr(*cluster.GetLeaderMember()), primaryTimeLine)
   554  	}
   555  
   556  	if len(history.History) != 0 {
   557  		// use a boolean value to check if the loop should exit early
   558  		exitFlag := false
   559  		for _, h := range history.History {
   560  			// Don't need to rewind just when:
   561  			// for replica: replayed location is not ahead of switchpoint
   562  			// for the former primary: end of checkpoint record is the same as switchpoint
   563  			if h.ParentTimeline == localTimeLine {
   564  				switch {
   565  				case isRecovery:
   566  					needRewind = localLsn > h.SwitchPoint
   567  				case localLsn >= h.SwitchPoint:
   568  					needRewind = true
   569  				default:
   570  					checkPointEnd := mgr.getCheckPointEnd(localTimeLine, localLsn)
   571  					needRewind = h.SwitchPoint != checkPointEnd
   572  				}
   573  				exitFlag = true
   574  				break
   575  			} else if h.ParentTimeline > localTimeLine {
   576  				needRewind = true
   577  				exitFlag = true
   578  				break
   579  			}
   580  		}
   581  		if !exitFlag {
   582  			needRewind = true
   583  		}
   584  	}
   585  
   586  	return needRewind
   587  }
   588  
   589  func (mgr *Manager) getCheckPointEnd(timeLine, lsn int64) int64 {
   590  	lsnStr := postgres.FormatPgLsn(lsn)
   591  
   592  	resp, err := postgres.PgWalDump("-t", strconv.FormatInt(timeLine, 10), "-s", lsnStr, "-n", "2")
   593  	if err == nil || resp == "" {
   594  		return 0
   595  	}
   596  
   597  	checkPointEndStr := postgres.ParsePgWalDumpError(err.Error(), lsnStr)
   598  
   599  	return postgres.ParsePgLsn(checkPointEndStr)
   600  }
   601  
   602  func (mgr *Manager) getPrimaryTimeLine(host string) (int64, error) {
   603  	resp, err := postgres.Psql("-h", host, "replication=database", "-c", "IDENTIFY_SYSTEM")
   604  	if err != nil {
   605  		mgr.Logger.Error(err, "get primary time line failed")
   606  		return 0, err
   607  	}
   608  
   609  	stdoutList := strings.Split(resp, "\n")
   610  	value := stdoutList[2]
   611  	values := strings.Split(value, "|")
   612  
   613  	primaryTimeLine := strings.TrimSpace(values[1])
   614  
   615  	return strconv.ParseInt(primaryTimeLine, 10, 64)
   616  }
   617  
   618  func (mgr *Manager) getLocalTimeLineAndLsn(ctx context.Context) (bool, int64, int64) {
   619  	var inRecovery bool
   620  
   621  	if !mgr.IsRunning() {
   622  		return mgr.getLocalTimeLineAndLsnFromControlData()
   623  	}
   624  
   625  	inRecovery = true
   626  	timeLine := mgr.getReceivedTimeLine(ctx, "")
   627  	lsn, _ := mgr.getLsnWithHost(ctx, "replay", "")
   628  
   629  	return inRecovery, timeLine, lsn
   630  }
   631  
   632  func (mgr *Manager) getLocalTimeLineAndLsnFromControlData() (bool, int64, int64) {
   633  	var inRecovery bool
   634  	var timeLineStr, lsnStr string
   635  	var timeLine, lsn int64
   636  
   637  	pgControlData := mgr.getPgControlData()
   638  	if slices.Contains([]string{"shut down in recovery", "in archive recovery"}, (pgControlData)["Database cluster state"]) {
   639  		inRecovery = true
   640  		lsnStr = (pgControlData)["Minimum recovery ending location"]
   641  		timeLineStr = (pgControlData)["Min recovery ending loc's timeline"]
   642  	} else if (pgControlData)["Database cluster state"] == "shut down" {
   643  		inRecovery = false
   644  		lsnStr = (pgControlData)["Latest checkpoint location"]
   645  		timeLineStr = (pgControlData)["Latest checkpoint's TimeLineID"]
   646  	}
   647  
   648  	if lsnStr != "" {
   649  		lsn = postgres.ParsePgLsn(lsnStr)
   650  	}
   651  	if timeLineStr != "" {
   652  		timeLine, _ = strconv.ParseInt(timeLineStr, 10, 64)
   653  	}
   654  
   655  	return inRecovery, timeLine, lsn
   656  }
   657  
   658  func (mgr *Manager) getTimeLineWithHost(ctx context.Context, host string) int64 {
   659  	if mgr.DBState != nil && mgr.DBState.Extra[postgres.TimeLine] != "" && host == "" {
   660  		return cast.ToInt64(mgr.DBState.Extra[postgres.TimeLine])
   661  	}
   662  
   663  	var isLeader bool
   664  	var err error
   665  	if host == "" {
   666  		isLeader, err = mgr.IsLeader(ctx, nil)
   667  	} else {
   668  		isLeader, err = mgr.IsLeaderWithHost(ctx, host)
   669  	}
   670  	if err != nil {
   671  		mgr.Logger.Error(err, "get timeLine check leader failed")
   672  		return 0
   673  	}
   674  
   675  	if isLeader {
   676  		return mgr.getCurrentTimeLine(ctx, host)
   677  	} else {
   678  		return mgr.getReceivedTimeLine(ctx, host)
   679  	}
   680  }
   681  
   682  func (mgr *Manager) getCurrentTimeLine(ctx context.Context, host string) int64 {
   683  	sql := "SELECT timeline_id FROM pg_control_checkpoint();"
   684  	resp, err := mgr.QueryWithHost(ctx, sql, host)
   685  	if err != nil || resp == nil {
   686  		mgr.Logger.Error(err, "get current timeline failed")
   687  		return 0
   688  	}
   689  
   690  	resMap, err := postgres.ParseQuery(string(resp))
   691  	if err != nil {
   692  		mgr.Logger.Error(err, "parse query response failed", "response", string(resp))
   693  		return 0
   694  	}
   695  
   696  	return cast.ToInt64(resMap[0]["timeline_id"])
   697  }
   698  
   699  func (mgr *Manager) getReceivedTimeLine(ctx context.Context, host string) int64 {
   700  	sql := "select case when latest_end_lsn is null then null " +
   701  		"else received_tli end as received_tli from pg_catalog.pg_stat_get_wal_receiver();"
   702  	resp, err := mgr.QueryWithHost(ctx, sql, host)
   703  	if err != nil || resp == nil {
   704  		mgr.Logger.Error(err, "get received timeline failed")
   705  		return 0
   706  	}
   707  
   708  	resMap, err := postgres.ParseQuery(string(resp))
   709  	if err != nil {
   710  		mgr.Logger.Error(err, fmt.Sprintf("parse query response:%s failed", string(resp)))
   711  		return 0
   712  	}
   713  
   714  	return cast.ToInt64(resMap[0]["received_tli"])
   715  }
   716  
   717  func (mgr *Manager) getPgControlData() map[string]string {
   718  	if mgr.pgControlData != nil {
   719  		return mgr.pgControlData
   720  	}
   721  
   722  	result := map[string]string{}
   723  
   724  	resp, err := postgres.ExecCommand("pg_controldata")
   725  	if err != nil {
   726  		mgr.Logger.Error(err, "get pg control data failed")
   727  		return nil
   728  	}
   729  
   730  	controlDataList := strings.Split(resp, "\n")
   731  	for _, s := range controlDataList {
   732  		out := strings.Split(s, ":")
   733  		if len(out) == 2 {
   734  			result[out[0]] = strings.TrimSpace(out[1])
   735  		}
   736  	}
   737  	return result
   738  }
   739  
   740  func (mgr *Manager) checkRecoveryConf(ctx context.Context, leaderName string) (bool, bool) {
   741  	if mgr.MajorVersion >= 12 {
   742  		_, err := fs.Stat(mgr.DataDir + "/standby.signal")
   743  		if errors.Is(err, afero.ErrFileNotFound) {
   744  			return true, true
   745  		}
   746  	} else {
   747  		mgr.Logger.Info("check recovery conf")
   748  		// TODO: support check recovery.conf
   749  	}
   750  
   751  	recoveryParams, err := mgr.readRecoveryParams(ctx)
   752  	if err != nil {
   753  		return true, true
   754  	}
   755  
   756  	if !strings.HasPrefix(recoveryParams[postgres.PrimaryConnInfo]["host"], leaderName) {
   757  		if recoveryParams[postgres.PrimaryConnInfo]["context"] == "postmaster" {
   758  			mgr.Logger.Info("host not match, need to restart")
   759  			return true, true
   760  		} else {
   761  			mgr.Logger.Info("host not match, need to reload")
   762  			return true, false
   763  		}
   764  	}
   765  
   766  	return false, false
   767  }
   768  
   769  func (mgr *Manager) readRecoveryParams(ctx context.Context) (map[string]map[string]string, error) {
   770  	if mgr.recoveryParams != nil {
   771  		return mgr.recoveryParams, nil
   772  	}
   773  
   774  	sql := fmt.Sprintf(`SELECT name, setting, context FROM pg_catalog.pg_settings WHERE pg_catalog.lower(name) = '%s';`, postgres.PrimaryConnInfo)
   775  	resp, err := mgr.Query(ctx, sql)
   776  	if err != nil {
   777  		return nil, err
   778  	}
   779  
   780  	resMap, err := postgres.ParseQuery(string(resp))
   781  	if err != nil {
   782  		return nil, err
   783  	}
   784  
   785  	primaryConnInfo := postgres.ParsePrimaryConnInfo(cast.ToString(resMap[0]["setting"]))
   786  	primaryConnInfo["context"] = cast.ToString(resMap[0]["context"])
   787  
   788  	return map[string]map[string]string{
   789  		postgres.PrimaryConnInfo: primaryConnInfo,
   790  	}, nil
   791  }
   792  
   793  func (mgr *Manager) getHistory(host string, timeline int64) *postgres.HistoryFile {
   794  	resp, err := postgres.Psql("-h", host, "replication=database", "-c", fmt.Sprintf("TIMELINE_HISTORY %d", timeline))
   795  	if err != nil {
   796  		mgr.Logger.Error(err, "get history failed")
   797  		return nil
   798  	}
   799  
   800  	return postgres.ParseHistory(resp)
   801  }
   802  
   803  func (mgr *Manager) Promote(ctx context.Context, cluster *dcs.Cluster) error {
   804  	if isLeader, err := mgr.IsLeader(ctx, nil); isLeader && err == nil {
   805  		mgr.Logger.Info("i am already the leader, don't need to promote")
   806  		return nil
   807  	}
   808  
   809  	resp, err := postgres.PgCtl("promote")
   810  	if err != nil {
   811  		mgr.Logger.Error(err, "promote failed")
   812  		return err
   813  	}
   814  
   815  	mgr.Logger.Info("promote success", "response", resp)
   816  	return nil
   817  }
   818  
   819  func (mgr *Manager) Demote(ctx context.Context) error {
   820  	mgr.Logger.Info(fmt.Sprintf("current member demoting: %s", mgr.CurrentMemberName))
   821  	if isLeader, err := mgr.IsLeader(ctx, nil); !isLeader && err == nil {
   822  		mgr.Logger.Info("i am not the leader, don't need to demote")
   823  		return nil
   824  	}
   825  
   826  	return mgr.Stop()
   827  }
   828  
   829  func (mgr *Manager) Stop() error {
   830  	err := mgr.DBManagerBase.Stop()
   831  	if err != nil {
   832  		return err
   833  	}
   834  
   835  	_, err = postgres.PgCtl("stop -m fast")
   836  	if err != nil {
   837  		mgr.Logger.Error(err, "pg_ctl stop failed")
   838  		return err
   839  	}
   840  
   841  	return nil
   842  }
   843  
   844  func (mgr *Manager) Follow(ctx context.Context, cluster *dcs.Cluster) error {
   845  	// only when db is not running, leader probably be nil
   846  	if cluster.Leader == nil {
   847  		mgr.Logger.Info("cluster has no leader now, starts db firstly without following")
   848  		return nil
   849  	}
   850  
   851  	err := mgr.handleRewind(ctx, cluster)
   852  	if err != nil {
   853  		mgr.Logger.Error(err, "handle rewind failed")
   854  		return err
   855  	}
   856  
   857  	needChange, needRestart := mgr.checkRecoveryConf(ctx, cluster.Leader.Name)
   858  	if needChange {
   859  		return mgr.follow(ctx, needRestart, cluster)
   860  	}
   861  
   862  	mgr.Logger.Info(fmt.Sprintf("no action coz i still follow the leader:%s", cluster.Leader.Name))
   863  	return nil
   864  }
   865  
   866  func (mgr *Manager) follow(ctx context.Context, needRestart bool, cluster *dcs.Cluster) error {
   867  	leaderMember := cluster.GetLeaderMember()
   868  	if leaderMember == nil {
   869  		mgr.Logger.Info("cluster has no leader now, just start if need")
   870  		if needRestart {
   871  			return mgr.DBManagerBase.Start(ctx, cluster)
   872  		}
   873  		return nil
   874  	}
   875  
   876  	if mgr.CurrentMemberName == leaderMember.Name {
   877  		mgr.Logger.Info("i get the leader key, don't need to follow")
   878  		return nil
   879  	}
   880  
   881  	primaryInfo := fmt.Sprintf("\nprimary_conninfo = 'host=%s port=%s user=%s password=%s application_name=%s'",
   882  		cluster.GetMemberAddr(*leaderMember), leaderMember.DBPort, mgr.Config.Username, mgr.Config.Password, mgr.CurrentMemberName)
   883  
   884  	pgConf, err := fs.OpenFile("/kubeblocks/conf/postgresql.conf", os.O_APPEND|os.O_RDWR, 0644)
   885  	if err != nil {
   886  		mgr.Logger.Error(err, "open postgresql.conf failed")
   887  		return err
   888  	}
   889  	defer func() {
   890  		_ = pgConf.Close()
   891  	}()
   892  
   893  	writer := bufio.NewWriter(pgConf)
   894  	_, err = writer.WriteString(primaryInfo)
   895  	if err != nil {
   896  		mgr.Logger.Error(err, "write into postgresql.conf failed")
   897  		return err
   898  	}
   899  
   900  	err = writer.Flush()
   901  	if err != nil {
   902  		mgr.Logger.Error(err, "writer flush failed")
   903  		return err
   904  	}
   905  
   906  	if !needRestart {
   907  		if err = mgr.PgReload(ctx); err != nil {
   908  			mgr.Logger.Error(err, "reload conf failed")
   909  			return err
   910  		}
   911  		return nil
   912  	}
   913  
   914  	return mgr.DBManagerBase.Start(ctx, cluster)
   915  }
   916  
   917  // Start for postgresql replication, not only means the start of a database instance
   918  // but also signifies its launch as a follower in the cluster, following the leader.
   919  func (mgr *Manager) Start(ctx context.Context, cluster *dcs.Cluster) error {
   920  	err := mgr.follow(ctx, true, cluster)
   921  	if err != nil {
   922  		mgr.Logger.Error(err, "start failed")
   923  		return err
   924  	}
   925  	return nil
   926  }
   927  
   928  func (mgr *Manager) HasOtherHealthyLeader(context.Context, *dcs.Cluster) *dcs.Member {
   929  	return nil
   930  }
   931  
   932  func (mgr *Manager) HasOtherHealthyMembers(ctx context.Context, cluster *dcs.Cluster, leader string) []*dcs.Member {
   933  	members := make([]*dcs.Member, 0)
   934  
   935  	for i, m := range cluster.Members {
   936  		if m.Name != leader && mgr.IsMemberHealthy(ctx, cluster, &m) {
   937  			members = append(members, &cluster.Members[i])
   938  		}
   939  	}
   940  
   941  	return members
   942  }