github.com/1aal/kubeblocks@v0.0.0-20231107070852-e1c03e598921/pkg/lorry/engines/postgres/officalpostgres/manager_test.go (about)

     1  /*
     2  Copyright (C) 2022-2023 ApeCloud Co., Ltd
     3  
     4  This file is part of KubeBlocks project
     5  
     6  This program is free software: you can redistribute it and/or modify
     7  it under the terms of the GNU Affero General Public License as published by
     8  the Free Software Foundation, either version 3 of the License, or
     9  (at your option) any later version.
    10  
    11  This program is distributed in the hope that it will be useful
    12  but WITHOUT ANY WARRANTY; without even the implied warranty of
    13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    14  GNU Affero General Public License for more details.
    15  
    16  You should have received a copy of the GNU Affero General Public License
    17  along with this program.  If not, see <http://www.gnu.org/licenses/>.
    18  */
    19  
    20  package officalpostgres
    21  
    22  import (
    23  	"bytes"
    24  	"context"
    25  	"fmt"
    26  	"strings"
    27  	"testing"
    28  
    29  	"github.com/pashagolub/pgxmock/v2"
    30  	"github.com/shirou/gopsutil/v3/process"
    31  	"github.com/spf13/afero"
    32  	"github.com/stretchr/testify/assert"
    33  
    34  	"github.com/1aal/kubeblocks/pkg/constant"
    35  	"github.com/1aal/kubeblocks/pkg/lorry/dcs"
    36  	"github.com/1aal/kubeblocks/pkg/lorry/engines"
    37  	"github.com/1aal/kubeblocks/pkg/lorry/engines/postgres"
    38  	viper "github.com/1aal/kubeblocks/pkg/viperx"
    39  )
    40  
    41  func MockDatabase(t *testing.T) (*Manager, pgxmock.PgxPoolIface, error) {
    42  	properties := map[string]string{
    43  		postgres.ConnectionURLKey: "user=test password=test host=localhost port=5432 dbname=postgres",
    44  	}
    45  	testConfig, err := postgres.NewConfig(properties)
    46  	assert.NotNil(t, testConfig)
    47  	assert.Nil(t, err)
    48  
    49  	viper.Set(constant.KBEnvPodName, "test-pod-0")
    50  	viper.Set(constant.KBEnvClusterCompName, "test")
    51  	viper.Set(constant.KBEnvNamespace, "default")
    52  	viper.Set(postgres.PGDATA, "test")
    53  	viper.Set(postgres.PGMAJOR, 14)
    54  	mock, err := pgxmock.NewPool(pgxmock.MonitorPingsOption(true))
    55  	if err != nil {
    56  		t.Fatal(err)
    57  	}
    58  
    59  	dbManager, err := NewManager(engines.Properties(properties))
    60  	if err != nil {
    61  		t.Fatal(err)
    62  	}
    63  	manager := dbManager.(*Manager)
    64  	manager.Pool = mock
    65  
    66  	return manager, mock, err
    67  }
    68  
    69  func TestIsLeader(t *testing.T) {
    70  	ctx := context.TODO()
    71  	manager, mock, _ := MockDatabase(t)
    72  	defer mock.Close()
    73  
    74  	t.Run("get member role primary", func(t *testing.T) {
    75  		mock.ExpectQuery("select").
    76  			WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(false))
    77  
    78  		isLeader, err := manager.IsLeader(ctx, nil)
    79  		assert.Nil(t, err)
    80  		assert.Equal(t, true, isLeader)
    81  	})
    82  
    83  	t.Run("get member role secondary", func(t *testing.T) {
    84  		mock.ExpectQuery("select").
    85  			WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(true))
    86  
    87  		isLeader, err := manager.IsLeader(ctx, nil)
    88  		assert.Nil(t, err)
    89  		assert.Equal(t, false, isLeader)
    90  	})
    91  
    92  	t.Run("query failed", func(t *testing.T) {
    93  		mock.ExpectQuery("select").
    94  			WillReturnError(fmt.Errorf("some error"))
    95  
    96  		isLeader, err := manager.IsLeader(ctx, nil)
    97  		assert.NotNil(t, err)
    98  		assert.Equal(t, false, isLeader)
    99  	})
   100  
   101  	t.Run("parse query failed", func(t *testing.T) {
   102  		mock.ExpectQuery("select").
   103  			WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}))
   104  		isLeader, err := manager.IsLeader(ctx, nil)
   105  		assert.NotNil(t, err)
   106  		assert.Equal(t, false, isLeader)
   107  	})
   108  
   109  	t.Run("has set isLeader", func(t *testing.T) {
   110  		manager.SetIsLeader(true)
   111  		isLeader, err := manager.IsLeader(ctx, nil)
   112  		assert.Nil(t, err)
   113  		assert.Equal(t, true, isLeader)
   114  	})
   115  
   116  	if err := mock.ExpectationsWereMet(); err != nil {
   117  		t.Errorf("there were unfulfilled expectations: %v", err)
   118  	}
   119  }
   120  
   121  func TestIsClusterInitialized(t *testing.T) {
   122  	ctx := context.TODO()
   123  	manager, mock, _ := MockDatabase(t)
   124  	defer mock.Close()
   125  	cluster := &dcs.Cluster{}
   126  
   127  	t.Run("DBStartup is set Ready", func(t *testing.T) {
   128  		manager.DBStartupReady = true
   129  
   130  		isInitialized, err := manager.IsClusterInitialized(ctx, cluster)
   131  		if err != nil {
   132  			t.Errorf("exepect check is cluster initialized success but failed")
   133  		}
   134  
   135  		assert.True(t, isInitialized)
   136  		manager.DBStartupReady = false
   137  	})
   138  
   139  	t.Run("DBStartup is not set ready and ping success", func(t *testing.T) {
   140  		mock.ExpectPing()
   141  		isInitialized, err := manager.IsClusterInitialized(ctx, cluster)
   142  		if err != nil {
   143  			t.Errorf("exepect check is cluster initialized success but failed")
   144  		}
   145  
   146  		if err = mock.ExpectationsWereMet(); err != nil {
   147  			t.Errorf("there were unfulfilled expectations: %v", err)
   148  		}
   149  
   150  		assert.True(t, isInitialized)
   151  		manager.DBStartupReady = false
   152  	})
   153  
   154  	t.Run("DBStartup is not set ready but ping failed", func(t *testing.T) {
   155  		isInitialized, err := manager.IsClusterInitialized(ctx, cluster)
   156  		if err != nil {
   157  			t.Errorf("exepect check is cluster initialized success but failed")
   158  		}
   159  
   160  		assert.False(t, isInitialized)
   161  		manager.DBStartupReady = false
   162  	})
   163  }
   164  
   165  func TestGetMemberAddrs(t *testing.T) {
   166  	ctx := context.TODO()
   167  	manager, mock, _ := MockDatabase(t)
   168  	defer mock.Close()
   169  	cluster := &dcs.Cluster{Namespace: "default"}
   170  
   171  	t.Run("get empty addrs", func(t *testing.T) {
   172  		addrs := manager.GetMemberAddrs(ctx, cluster)
   173  
   174  		assert.Equal(t, []string{}, addrs)
   175  	})
   176  
   177  	t.Run("get addrs", func(t *testing.T) {
   178  		cluster.ClusterCompName = "pg"
   179  		cluster.Members = append(cluster.Members, dcs.Member{
   180  			Name:   "test",
   181  			DBPort: "5432",
   182  		})
   183  		addrs := manager.GetMemberAddrs(ctx, cluster)
   184  
   185  		assert.Equal(t, 1, len(addrs))
   186  		assert.Equal(t, "test.pg-headless.default.svc.cluster.local:5432", addrs[0])
   187  	})
   188  }
   189  
   190  func TestIsCurrentMemberHealthy(t *testing.T) {
   191  	ctx := context.TODO()
   192  	manager, mock, _ := MockDatabase(t)
   193  	defer mock.Close()
   194  	cluster := &dcs.Cluster{
   195  		Leader: &dcs.Leader{
   196  			Name: manager.CurrentMemberName,
   197  		},
   198  	}
   199  	cluster.Members = append(cluster.Members, dcs.Member{
   200  		Name: manager.CurrentMemberName,
   201  	})
   202  
   203  	t.Run("current member is healthy", func(t *testing.T) {
   204  		mock.ExpectExec(`create table if not exists`).
   205  			WillReturnResult(pgxmock.NewResult("CREATE TABLE", 0))
   206  		mock.ExpectQuery("select").
   207  			WillReturnRows(pgxmock.NewRows([]string{"check_ts"}).AddRow(1))
   208  
   209  		isCurrentMemberHealthy := manager.IsCurrentMemberHealthy(ctx, cluster)
   210  		assert.True(t, isCurrentMemberHealthy)
   211  	})
   212  
   213  	t.Run("write check failed", func(t *testing.T) {
   214  		mock.ExpectExec(`create table if not exists`).
   215  			WillReturnError(fmt.Errorf("some error"))
   216  
   217  		isCurrentMemberHealthy := manager.IsCurrentMemberHealthy(ctx, cluster)
   218  		assert.False(t, isCurrentMemberHealthy)
   219  	})
   220  
   221  	t.Run("read check failed", func(t *testing.T) {
   222  		cluster.Leader.Name = "test"
   223  		mock.ExpectQuery("select").
   224  			WillReturnError(fmt.Errorf("some error"))
   225  
   226  		isCurrentMemberHealthy := manager.IsCurrentMemberHealthy(ctx, cluster)
   227  		assert.False(t, isCurrentMemberHealthy)
   228  	})
   229  
   230  	if err := mock.ExpectationsWereMet(); err != nil {
   231  		t.Errorf("there were unfulfilled expectations: %v", err)
   232  	}
   233  }
   234  
   235  func TestGetReplicationMode(t *testing.T) {
   236  	ctx := context.TODO()
   237  	manager, mock, _ := MockDatabase(t)
   238  	defer mock.Close()
   239  	values := []string{"off", "local", "remote_write", "remote_apply", "on", ""}
   240  	expects := []string{postgres.Asynchronous, postgres.Asynchronous, postgres.Asynchronous, postgres.Synchronous, postgres.Synchronous, postgres.Synchronous}
   241  	manager.DBState = &dcs.DBState{
   242  		Extra: map[string]string{},
   243  	}
   244  
   245  	t.Run("parse query failed", func(t *testing.T) {
   246  		mock.ExpectQuery("select").
   247  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}))
   248  
   249  		res, err := manager.getReplicationMode(ctx)
   250  		assert.NotNil(t, err)
   251  		assert.Equal(t, "", res)
   252  	})
   253  
   254  	t.Run("synchronous_commit has not been set", func(t *testing.T) {
   255  		for i, v := range values {
   256  			mock.ExpectQuery("select").
   257  				WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow(v))
   258  
   259  			res, err := manager.getReplicationMode(ctx)
   260  			assert.Nil(t, err)
   261  			assert.Equal(t, expects[i], res)
   262  		}
   263  	})
   264  
   265  	t.Run("synchronous_commit has been set", func(t *testing.T) {
   266  		for i, v := range expects {
   267  			manager.DBState.Extra[postgres.ReplicationMode] = v
   268  			res, err := manager.getReplicationMode(ctx)
   269  			assert.Nil(t, err)
   270  			assert.Equal(t, expects[i], res)
   271  		}
   272  	})
   273  
   274  	if err := mock.ExpectationsWereMet(); err != nil {
   275  		t.Errorf("there were unfulfilled expectations: %v", err)
   276  	}
   277  }
   278  
   279  func TestGetWalPositionWithHost(t *testing.T) {
   280  	ctx := context.TODO()
   281  	manager, mock, _ := MockDatabase(t)
   282  	defer mock.Close()
   283  
   284  	t.Run("check is leader failed", func(t *testing.T) {
   285  		res, err := manager.getWalPositionWithHost(ctx, "test")
   286  		assert.NotNil(t, err)
   287  		assert.Zero(t, res)
   288  	})
   289  
   290  	t.Run("get primary wal position success", func(t *testing.T) {
   291  		manager.SetIsLeader(true)
   292  		mock.ExpectQuery("pg_catalog.pg_current_wal_lsn()").
   293  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454272))
   294  
   295  		res, err := manager.getWalPositionWithHost(ctx, "")
   296  		assert.Nil(t, err)
   297  		assert.Equal(t, int64(23454272), res)
   298  	})
   299  
   300  	t.Run("get secondary wal position success", func(t *testing.T) {
   301  		manager.SetIsLeader(false)
   302  		mock.ExpectQuery("pg_last_wal_replay_lsn()").
   303  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454272))
   304  		mock.ExpectQuery("pg_catalog.pg_last_wal_receive_lsn()").
   305  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454273))
   306  
   307  		res, err := manager.getWalPositionWithHost(ctx, "")
   308  		assert.Nil(t, err)
   309  		assert.Equal(t, int64(23454273), res)
   310  	})
   311  
   312  	t.Run("get primary wal position failed", func(t *testing.T) {
   313  		manager.SetIsLeader(true)
   314  		manager.DBState = &dcs.DBState{}
   315  		mock.ExpectQuery("pg_catalog.pg_current_wal_lsn()").
   316  			WillReturnError(fmt.Errorf("some error"))
   317  
   318  		res, err := manager.getWalPositionWithHost(ctx, "")
   319  		assert.NotNil(t, err)
   320  		assert.Zero(t, res)
   321  	})
   322  
   323  	t.Run("get secondary wal position failed", func(t *testing.T) {
   324  		manager.SetIsLeader(false)
   325  		mock.ExpectQuery("pg_last_wal_replay_lsn()").
   326  			WillReturnError(fmt.Errorf("some error"))
   327  		mock.ExpectQuery("pg_catalog.pg_last_wal_receive_lsn()").
   328  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}))
   329  
   330  		res, err := manager.getWalPositionWithHost(ctx, "")
   331  		assert.NotNil(t, err)
   332  		assert.Zero(t, res)
   333  	})
   334  
   335  	t.Run("op time has been set", func(t *testing.T) {
   336  		manager.DBState = &dcs.DBState{
   337  			OpTimestamp: 100,
   338  		}
   339  
   340  		res, err := manager.getWalPositionWithHost(ctx, "")
   341  		assert.Nil(t, err)
   342  		assert.Equal(t, int64(100), res)
   343  	})
   344  
   345  	if err := mock.ExpectationsWereMet(); err != nil {
   346  		t.Errorf("there were unfulfilled expectations: %v", err)
   347  	}
   348  }
   349  
   350  func TestGetSyncStandbys(t *testing.T) {
   351  	ctx := context.TODO()
   352  	manager, mock, _ := MockDatabase(t)
   353  	defer mock.Close()
   354  
   355  	t.Run("query failed", func(t *testing.T) {
   356  		mock.ExpectQuery("select").
   357  			WillReturnError(fmt.Errorf("some error"))
   358  
   359  		standbys := manager.getSyncStandbys(ctx)
   360  		assert.Nil(t, standbys)
   361  	})
   362  
   363  	t.Run("parse pg sync standby failed", func(t *testing.T) {
   364  		mock.ExpectQuery("select").
   365  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow(`ANY 4("a" b,"c c")`))
   366  
   367  		standbys := manager.getSyncStandbys(ctx)
   368  		assert.Nil(t, standbys)
   369  	})
   370  
   371  	t.Run("get sync standbys success", func(t *testing.T) {
   372  		mock.ExpectQuery("select").
   373  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow(`ANY 4("a",*,b)`))
   374  
   375  		standbys := manager.getSyncStandbys(ctx)
   376  		assert.NotNil(t, standbys)
   377  		assert.True(t, standbys.HasStar)
   378  		assert.True(t, standbys.Members.Contains("a"))
   379  		assert.Equal(t, 4, standbys.Amount)
   380  	})
   381  
   382  	t.Run("pg sync standbys has been set", func(t *testing.T) {
   383  		manager.DBState = &dcs.DBState{}
   384  		manager.syncStandbys = &postgres.PGStandby{
   385  			HasStar: true,
   386  			Amount:  3,
   387  		}
   388  
   389  		standbys := manager.getSyncStandbys(ctx)
   390  		assert.True(t, standbys.HasStar)
   391  		assert.Equal(t, 3, standbys.Amount)
   392  	})
   393  
   394  	if err := mock.ExpectationsWereMet(); err != nil {
   395  		t.Errorf("there were unfulfilled expectations: %v", err)
   396  	}
   397  }
   398  
   399  func TestCheckStandbySynchronizedToLeader(t *testing.T) {
   400  	cluster := &dcs.Cluster{
   401  		Leader: &dcs.Leader{
   402  			DBState: &dcs.DBState{
   403  				Extra: map[string]string{},
   404  			},
   405  		},
   406  	}
   407  
   408  	t.Run("synchronized to leader", func(t *testing.T) {
   409  		manager, _, _ := MockDatabase(t)
   410  		manager.CurrentMemberName = "a"
   411  		cluster.Leader.DBState.Extra[postgres.SyncStandBys] = "a,b,c"
   412  
   413  		ok := manager.checkStandbySynchronizedToLeader(true, cluster)
   414  		assert.True(t, ok)
   415  	})
   416  
   417  	t.Run("is leader", func(t *testing.T) {
   418  		manager, _, _ := MockDatabase(t)
   419  		manager.CurrentMemberName = "a"
   420  		cluster.Leader.Name = "a"
   421  		cluster.Leader.DBState.Extra[postgres.SyncStandBys] = "b,c"
   422  
   423  		ok := manager.checkStandbySynchronizedToLeader(true, cluster)
   424  		assert.True(t, ok)
   425  	})
   426  
   427  	t.Run("not synchronized to leader", func(t *testing.T) {
   428  		manager, _, _ := MockDatabase(t)
   429  		manager.CurrentMemberName = "d"
   430  		cluster.Leader.DBState.Extra[postgres.SyncStandBys] = "a,b,c"
   431  
   432  		ok := manager.checkStandbySynchronizedToLeader(true, cluster)
   433  		assert.False(t, ok)
   434  	})
   435  }
   436  
   437  func TestGetReceivedTimeLine(t *testing.T) {
   438  	ctx := context.TODO()
   439  	manager, mock, _ := MockDatabase(t)
   440  	defer mock.Close()
   441  
   442  	t.Run("get received timeline success", func(t *testing.T) {
   443  		mock.ExpectQuery("select").
   444  			WillReturnRows(pgxmock.NewRows([]string{"received_tli"}).AddRow(1))
   445  
   446  		timeLine := manager.getReceivedTimeLine(ctx, "")
   447  		assert.Equal(t, int64(1), timeLine)
   448  	})
   449  
   450  	t.Run("query failed", func(t *testing.T) {
   451  		mock.ExpectQuery("select").
   452  			WillReturnError(fmt.Errorf("some error"))
   453  
   454  		timeLine := manager.getReceivedTimeLine(ctx, "")
   455  		assert.Equal(t, int64(0), timeLine)
   456  	})
   457  
   458  	t.Run("parse query failed", func(t *testing.T) {
   459  		mock.ExpectQuery("select").
   460  			WillReturnRows(pgxmock.NewRows([]string{"received_tli"}))
   461  
   462  		timeLine := manager.getReceivedTimeLine(ctx, "")
   463  		assert.Equal(t, int64(0), timeLine)
   464  	})
   465  
   466  	if err := mock.ExpectationsWereMet(); err != nil {
   467  		t.Errorf("there were unfulfilled expectations: %v", err)
   468  	}
   469  }
   470  
   471  func TestReadRecoveryParams(t *testing.T) {
   472  	ctx := context.TODO()
   473  	manager, mock, _ := MockDatabase(t)
   474  	defer mock.Close()
   475  
   476  	t.Run("host match", func(t *testing.T) {
   477  		mock.ExpectQuery("pg_catalog.pg_settings").
   478  			WillReturnRows(pgxmock.NewRows([]string{"name", "setting", "context"}).
   479  				AddRow("primary_conninfo", "host=maple72-postgresql-0.maple72-postgresql-headless port=5432 application_name=my-application", "signup"))
   480  
   481  		leaderName := "maple72-postgresql-0"
   482  		recoveryParams, err := manager.readRecoveryParams(ctx)
   483  		assert.Nil(t, err)
   484  		assert.True(t, strings.HasPrefix(recoveryParams[postgres.PrimaryConnInfo]["host"], leaderName))
   485  	})
   486  
   487  	t.Run("host not match", func(t *testing.T) {
   488  		mock.ExpectQuery("pg_catalog.pg_settings").
   489  			WillReturnRows(pgxmock.NewRows([]string{"name", "setting", "context"}).
   490  				AddRow("primary_conninfo", "host=test port=5432 user=postgres application_name=my-application", "signup"))
   491  
   492  		leaderName := "a"
   493  		recoveryParams, err := manager.readRecoveryParams(ctx)
   494  		assert.Nil(t, err)
   495  		assert.False(t, strings.HasPrefix(recoveryParams[postgres.PrimaryConnInfo]["host"], leaderName))
   496  	})
   497  
   498  	t.Run("query failed", func(t *testing.T) {
   499  		mock.ExpectQuery("pg_catalog.pg_settings").
   500  			WillReturnError(fmt.Errorf("some error"))
   501  
   502  		recoveryParams, err := manager.readRecoveryParams(ctx)
   503  		assert.NotNil(t, err)
   504  		assert.Equal(t, "", recoveryParams[postgres.PrimaryConnInfo]["host"])
   505  	})
   506  
   507  	t.Run("parse query failed", func(t *testing.T) {
   508  		mock.ExpectQuery("pg_catalog.pg_settings").
   509  			WillReturnRows(pgxmock.NewRows([]string{"name", "setting", "context"}))
   510  
   511  		recoveryParams, err := manager.readRecoveryParams(ctx)
   512  		assert.NotNil(t, err)
   513  		assert.Equal(t, "", recoveryParams[postgres.PrimaryConnInfo]["host"])
   514  	})
   515  
   516  	t.Run("primary info has been set", func(t *testing.T) {
   517  		manager.recoveryParams = map[string]map[string]string{
   518  			postgres.PrimaryConnInfo: {
   519  				"host": "test",
   520  			},
   521  		}
   522  
   523  		recoveryParams, err := manager.readRecoveryParams(ctx)
   524  		assert.Nil(t, err)
   525  		assert.Equal(t, "test", recoveryParams[postgres.PrimaryConnInfo]["host"])
   526  	})
   527  
   528  	if err := mock.ExpectationsWereMet(); err != nil {
   529  		t.Errorf("there were unfulfilled expectations: %v", err)
   530  	}
   531  }
   532  
   533  func TestCheckRecoveryConf(t *testing.T) {
   534  	fs = afero.NewMemMapFs()
   535  	ctx := context.TODO()
   536  	manager, mock, _ := MockDatabase(t)
   537  	defer mock.Close()
   538  
   539  	t.Run("standby.signal not exist", func(t *testing.T) {
   540  		needChange, needRestart := manager.checkRecoveryConf(ctx, manager.CurrentMemberName)
   541  		assert.True(t, needChange)
   542  		assert.True(t, needRestart)
   543  	})
   544  
   545  	_, err := fs.Create(manager.DataDir + "/standby.signal")
   546  	assert.Nil(t, err)
   547  
   548  	t.Run("query primaryInfo failed", func(t *testing.T) {
   549  		mock.ExpectQuery("pg_catalog.pg_settings").
   550  			WillReturnError(fmt.Errorf("some error"))
   551  
   552  		needChange, needRestart := manager.checkRecoveryConf(ctx, manager.CurrentMemberName)
   553  		assert.True(t, needChange)
   554  		assert.True(t, needRestart)
   555  	})
   556  
   557  	t.Run("host not match and restart", func(t *testing.T) {
   558  		mock.ExpectQuery("pg_catalog.pg_settings").
   559  			WillReturnRows(pgxmock.NewRows([]string{"name", "setting", "context"}).
   560  				AddRow("primary_conninfo", "host=maple72-postgresql-0.maple72-postgresql-headless port=5432 application_name=my-application", "postmaster"))
   561  
   562  		needChange, needRestart := manager.checkRecoveryConf(ctx, manager.CurrentMemberName)
   563  		assert.True(t, needChange)
   564  		assert.True(t, needRestart)
   565  	})
   566  
   567  	t.Run("host not match and reload", func(t *testing.T) {
   568  		mock.ExpectQuery("pg_catalog.pg_settings").
   569  			WillReturnRows(pgxmock.NewRows([]string{"name", "setting", "context"}).
   570  				AddRow("primary_conninfo", "host=maple72-postgresql-0.maple72-postgresql-headless port=5432 application_name=my-application", "signup"))
   571  
   572  		needChange, needRestart := manager.checkRecoveryConf(ctx, manager.CurrentMemberName)
   573  		assert.True(t, needChange)
   574  		assert.False(t, needRestart)
   575  	})
   576  
   577  	t.Run("host match", func(t *testing.T) {
   578  		mock.ExpectQuery("pg_catalog.pg_settings").
   579  			WillReturnRows(pgxmock.NewRows([]string{"name", "setting", "context"}).
   580  				AddRow("primary_conninfo", "host=test-pod-0.maple72-postgresql-headless port=5432 application_name=my-application", "signup"))
   581  
   582  		needChange, needRestart := manager.checkRecoveryConf(ctx, manager.CurrentMemberName)
   583  		assert.False(t, needChange)
   584  		assert.False(t, needRestart)
   585  	})
   586  
   587  	if err = mock.ExpectationsWereMet(); err != nil {
   588  		t.Errorf("there were unfulfilled expectations: %v", err)
   589  	}
   590  }
   591  
   592  func TestIsMemberLagging(t *testing.T) {
   593  	ctx := context.TODO()
   594  	manager, mock, _ := MockDatabase(t)
   595  	defer mock.Close()
   596  	cluster := &dcs.Cluster{
   597  		HaConfig: &dcs.HaConfig{},
   598  	}
   599  	cluster.Members = append(cluster.Members, dcs.Member{
   600  		Name: manager.CurrentMemberName,
   601  	})
   602  	currentMember := cluster.GetMemberWithName(manager.CurrentMemberName)
   603  
   604  	t.Run("db state is nil", func(t *testing.T) {
   605  		isLagging, lag := manager.IsMemberLagging(ctx, cluster, currentMember)
   606  		assert.False(t, isLagging)
   607  		assert.Equal(t, int64(0), lag)
   608  	})
   609  
   610  	cluster.Leader = &dcs.Leader{
   611  		DBState: &dcs.DBState{
   612  			OpTimestamp: 100,
   613  			Extra: map[string]string{
   614  				postgres.TimeLine: "1",
   615  			},
   616  		},
   617  	}
   618  
   619  	t.Run("get replication mode failed", func(t *testing.T) {
   620  		mock.ExpectQuery("select").
   621  			WillReturnError(fmt.Errorf("some error"))
   622  
   623  		isLagging, lag := manager.IsMemberLagging(ctx, cluster, currentMember)
   624  		assert.True(t, isLagging)
   625  		assert.Equal(t, int64(1), lag)
   626  	})
   627  
   628  	t.Run("not sync to leader", func(t *testing.T) {
   629  		mock.ExpectQuery("select").
   630  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("on"))
   631  
   632  		isLagging, lag := manager.IsMemberLagging(ctx, cluster, currentMember)
   633  		assert.True(t, isLagging)
   634  		assert.Equal(t, int64(1), lag)
   635  	})
   636  
   637  	t.Run("get timeline failed", func(t *testing.T) {
   638  		manager.SetIsLeader(true)
   639  		mock.ExpectQuery("select").
   640  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("off"))
   641  		mock.ExpectQuery("SELECT timeline_id").
   642  			WillReturnError(fmt.Errorf("some error"))
   643  
   644  		isLagging, lag := manager.IsMemberLagging(ctx, cluster, currentMember)
   645  		assert.True(t, isLagging)
   646  		assert.Equal(t, int64(1), lag)
   647  	})
   648  
   649  	t.Run("timeline not match", func(t *testing.T) {
   650  		manager.SetIsLeader(true)
   651  		mock.ExpectQuery("select").
   652  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("off"))
   653  		mock.ExpectQuery("SELECT timeline_id").
   654  			WillReturnRows(pgxmock.NewRows([]string{"timeline_id"}).AddRow(2))
   655  		isLagging, lag := manager.IsMemberLagging(ctx, cluster, currentMember)
   656  		assert.True(t, isLagging)
   657  		assert.Equal(t, int64(1), lag)
   658  	})
   659  
   660  	t.Run("get wal position failed", func(t *testing.T) {
   661  		manager.SetIsLeader(true)
   662  		mock.ExpectQuery("select").
   663  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("off"))
   664  		mock.ExpectQuery("SELECT timeline_id").
   665  			WillReturnRows(pgxmock.NewRows([]string{"timeline_id"}).AddRow(1))
   666  		mock.ExpectQuery("pg_catalog.pg_current_wal_lsn()").
   667  			WillReturnError(fmt.Errorf("some error"))
   668  
   669  		isLagging, lag := manager.IsMemberLagging(ctx, cluster, currentMember)
   670  		assert.True(t, isLagging)
   671  		assert.Equal(t, int64(1), lag)
   672  	})
   673  
   674  	t.Run("current member is not lagging", func(t *testing.T) {
   675  		manager.SetIsLeader(true)
   676  		mock.ExpectQuery("select").
   677  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("off"))
   678  		mock.ExpectQuery("SELECT timeline_id").
   679  			WillReturnRows(pgxmock.NewRows([]string{"timeline_id"}).AddRow(1))
   680  		mock.ExpectQuery("pg_catalog.pg_current_wal_lsn()").
   681  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(100))
   682  
   683  		isLagging, lag := manager.IsMemberLagging(ctx, cluster, currentMember)
   684  		assert.False(t, isLagging)
   685  		assert.Equal(t, int64(0), lag)
   686  	})
   687  
   688  	if err := mock.ExpectationsWereMet(); err != nil {
   689  		t.Errorf("there were unfulfilled expectations: %v", err)
   690  	}
   691  }
   692  
   693  func TestGetCurrentTimeLine(t *testing.T) {
   694  	ctx := context.TODO()
   695  	manager, mock, _ := MockDatabase(t)
   696  	defer mock.Close()
   697  
   698  	t.Run("query failed", func(t *testing.T) {
   699  		mock.ExpectQuery("SELECT timeline_id").
   700  			WillReturnError(fmt.Errorf("some error"))
   701  
   702  		timeline := manager.getCurrentTimeLine(ctx, "")
   703  		assert.Equal(t, int64(0), timeline)
   704  	})
   705  
   706  	t.Run("parse query failed", func(t *testing.T) {
   707  		mock.ExpectQuery("SELECT timeline_id").
   708  			WillReturnRows(pgxmock.NewRows([]string{"timeline_id"}))
   709  
   710  		timeline := manager.getCurrentTimeLine(ctx, "")
   711  		assert.Equal(t, int64(0), timeline)
   712  	})
   713  
   714  	t.Run("get current timeline success", func(t *testing.T) {
   715  		mock.ExpectQuery("SELECT timeline_id").
   716  			WillReturnRows(pgxmock.NewRows([]string{"timeline_id"}).AddRow(1))
   717  
   718  		timeline := manager.getCurrentTimeLine(ctx, "")
   719  		assert.Equal(t, int64(1), timeline)
   720  	})
   721  
   722  	if err := mock.ExpectationsWereMet(); err != nil {
   723  		t.Errorf("there were unfulfilled expectations: %v", err)
   724  	}
   725  }
   726  
   727  func TestGetTimeLineWithHost(t *testing.T) {
   728  	ctx := context.TODO()
   729  	manager, mock, _ := MockDatabase(t)
   730  	defer mock.Close()
   731  
   732  	t.Run("check is leader failed", func(t *testing.T) {
   733  		timeLine := manager.getTimeLineWithHost(ctx, "test")
   734  		assert.Zero(t, timeLine)
   735  	})
   736  
   737  	t.Run("timeLine has been set", func(t *testing.T) {
   738  		manager.DBState = &dcs.DBState{
   739  			Extra: map[string]string{
   740  				postgres.TimeLine: "1",
   741  			},
   742  		}
   743  
   744  		timeLine := manager.getTimeLineWithHost(ctx, "")
   745  		assert.Equal(t, int64(1), timeLine)
   746  	})
   747  }
   748  
   749  func TestGetLocalTimeLineAndLsn(t *testing.T) {
   750  	ctx := context.TODO()
   751  	manager, mock, _ := MockDatabase(t)
   752  	defer mock.Close()
   753  
   754  	t.Run("db is not running", func(t *testing.T) {
   755  		isRecovery, localTimeLine, localLsn := manager.getLocalTimeLineAndLsn(ctx)
   756  		assert.False(t, isRecovery)
   757  		assert.Equal(t, int64(0), localTimeLine)
   758  		assert.Equal(t, int64(0), localLsn)
   759  	})
   760  
   761  	manager.Proc = &process.Process{
   762  		// Process 1 is always in a running state.
   763  		Pid: 1,
   764  	}
   765  
   766  	t.Run("get local timeline and lsn success", func(t *testing.T) {
   767  		mock.ExpectQuery("select").
   768  			WillReturnRows(pgxmock.NewRows([]string{"received_tli"}).AddRow(1))
   769  		mock.ExpectQuery("pg_last_wal_replay_lsn()").
   770  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454272))
   771  
   772  		isRecovery, localTimeLine, localLsn := manager.getLocalTimeLineAndLsn(ctx)
   773  		assert.True(t, isRecovery)
   774  		assert.Equal(t, int64(1), localTimeLine)
   775  		assert.Equal(t, int64(23454272), localLsn)
   776  	})
   777  
   778  	if err := mock.ExpectationsWereMet(); err != nil {
   779  		t.Errorf("there were unfulfilled expectations: %v", err)
   780  	}
   781  }
   782  
   783  func TestCleanDBState(t *testing.T) {
   784  	manager, mock, _ := MockDatabase(t)
   785  	defer mock.Close()
   786  
   787  	t.Run("clean db state", func(t *testing.T) {
   788  		manager.cleanDBState()
   789  		isSet, isLeader := manager.GetIsLeader()
   790  		assert.False(t, isSet)
   791  		assert.False(t, isLeader)
   792  		assert.Nil(t, manager.recoveryParams)
   793  		assert.Nil(t, manager.syncStandbys)
   794  		assert.Equal(t, &dcs.DBState{
   795  			Extra: map[string]string{},
   796  		}, manager.DBState)
   797  	})
   798  }
   799  
   800  func TestGetDBState(t *testing.T) {
   801  	ctx := context.TODO()
   802  	manager, mock, _ := MockDatabase(t)
   803  	defer mock.Close()
   804  	defer func() {
   805  		postgres.LocalCommander = postgres.NewExecCommander
   806  	}()
   807  	cluster := &dcs.Cluster{}
   808  
   809  	t.Run("check is leader failed", func(t *testing.T) {
   810  		mock.ExpectQuery("select").
   811  			WillReturnError(fmt.Errorf("some error"))
   812  
   813  		dbState := manager.GetDBState(ctx, cluster)
   814  		assert.Nil(t, dbState)
   815  	})
   816  
   817  	t.Run("get replication mode failed", func(t *testing.T) {
   818  		mock.ExpectQuery("select").
   819  			WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(false))
   820  		mock.ExpectQuery("select").
   821  			WillReturnError(fmt.Errorf("some error"))
   822  
   823  		dbState := manager.GetDBState(ctx, cluster)
   824  		assert.Nil(t, dbState)
   825  	})
   826  
   827  	t.Run("synchronous mode but get wal position failed", func(t *testing.T) {
   828  		cluster.Leader = &dcs.Leader{
   829  			Name: manager.CurrentMemberName,
   830  		}
   831  		mock.ExpectQuery("select").
   832  			WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(false))
   833  		mock.ExpectQuery("select").
   834  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("on"))
   835  		mock.ExpectQuery("select").
   836  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow(`ANY 4("a",*,b)`))
   837  		mock.ExpectQuery("pg_catalog.pg_current_wal_lsn()").
   838  			WillReturnError(fmt.Errorf("some error"))
   839  
   840  		dbState := manager.GetDBState(ctx, cluster)
   841  		assert.Nil(t, dbState)
   842  	})
   843  
   844  	t.Run("get timeline failed", func(t *testing.T) {
   845  		mock.ExpectQuery("select").
   846  			WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(false))
   847  		mock.ExpectQuery("select").
   848  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("off"))
   849  		mock.ExpectQuery("pg_catalog.pg_current_wal_lsn()").
   850  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454272))
   851  		mock.ExpectQuery("SELECT timeline_id").
   852  			WillReturnError(fmt.Errorf("some error"))
   853  
   854  		dbState := manager.GetDBState(ctx, cluster)
   855  		assert.Nil(t, dbState)
   856  	})
   857  
   858  	t.Run("read recovery params failed", func(t *testing.T) {
   859  		mock.ExpectQuery("select").
   860  			WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(true))
   861  		mock.ExpectQuery("select").
   862  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("off"))
   863  		mock.ExpectQuery("pg_last_wal_replay_lsn()").
   864  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454272))
   865  		mock.ExpectQuery("pg_catalog.pg_last_wal_receive_lsn()").
   866  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454273))
   867  		mock.ExpectQuery("select").
   868  			WillReturnRows(pgxmock.NewRows([]string{"received_tli"}).AddRow(1))
   869  		mock.ExpectQuery("pg_catalog.pg_settings").
   870  			WillReturnError(fmt.Errorf("some error"))
   871  
   872  		dbState := manager.GetDBState(ctx, cluster)
   873  		assert.Nil(t, dbState)
   874  	})
   875  
   876  	t.Run("get pg control data failed", func(t *testing.T) {
   877  		mock.ExpectQuery("select").
   878  			WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(true))
   879  		mock.ExpectQuery("select").
   880  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("off"))
   881  		mock.ExpectQuery("pg_last_wal_replay_lsn()").
   882  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454272))
   883  		mock.ExpectQuery("pg_catalog.pg_last_wal_receive_lsn()").
   884  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454273))
   885  		mock.ExpectQuery("select").
   886  			WillReturnRows(pgxmock.NewRows([]string{"received_tli"}).AddRow(1))
   887  		mock.ExpectQuery("pg_catalog.pg_settings").
   888  			WillReturnRows(pgxmock.NewRows([]string{"name", "setting", "context"}).
   889  				AddRow("primary_conninfo", "host=maple72-postgresql-0.maple72-postgresql-headless port=5432 application_name=my-application", "postmaster"))
   890  		postgres.LocalCommander = postgres.NewFakeCommander(func() error {
   891  			return fmt.Errorf("some error")
   892  		}, nil, nil)
   893  
   894  		dbState := manager.GetDBState(ctx, cluster)
   895  		assert.Nil(t, dbState)
   896  	})
   897  
   898  	t.Run("get db state success", func(t *testing.T) {
   899  		mock.ExpectQuery("select").
   900  			WillReturnRows(pgxmock.NewRows([]string{"pg_is_in_recovery"}).AddRow(true))
   901  		mock.ExpectQuery("select").
   902  			WillReturnRows(pgxmock.NewRows([]string{"current_setting"}).AddRow("off"))
   903  		mock.ExpectQuery("pg_last_wal_replay_lsn()").
   904  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454272))
   905  		mock.ExpectQuery("pg_catalog.pg_last_wal_receive_lsn()").
   906  			WillReturnRows(pgxmock.NewRows([]string{"pg_wal_lsn_diff"}).AddRow(23454273))
   907  		mock.ExpectQuery("select").
   908  			WillReturnRows(pgxmock.NewRows([]string{"received_tli"}).AddRow(1))
   909  		mock.ExpectQuery("pg_catalog.pg_settings").
   910  			WillReturnRows(pgxmock.NewRows([]string{"name", "setting", "context"}).
   911  				AddRow("primary_conninfo", "host=maple72-postgresql-0.maple72-postgresql-headless port=5432 application_name=my-application", "postmaster"))
   912  		fakeControlData := "WAL block size:                       8192\n" +
   913  			"Database cluster state:               shut down"
   914  
   915  		var stdout = bytes.NewBuffer([]byte(fakeControlData))
   916  		postgres.LocalCommander = postgres.NewFakeCommander(func() error {
   917  			return nil
   918  		}, stdout, nil)
   919  
   920  		dbState := manager.GetDBState(ctx, cluster)
   921  		isSet, isLeader := manager.GetIsLeader()
   922  		assert.NotNil(t, dbState)
   923  		assert.True(t, isSet)
   924  		assert.False(t, isLeader)
   925  		assert.Equal(t, postgres.Asynchronous, dbState.Extra[postgres.ReplicationMode])
   926  		assert.Equal(t, int64(23454273), dbState.OpTimestamp)
   927  		assert.Equal(t, "1", dbState.Extra[postgres.TimeLine])
   928  		assert.Equal(t, "maple72-postgresql-0.maple72-postgresql-headless", manager.recoveryParams[postgres.PrimaryConnInfo]["host"])
   929  		assert.Equal(t, "postmaster", manager.recoveryParams[postgres.PrimaryConnInfo]["context"])
   930  		assert.Equal(t, "shut down", manager.pgControlData["Database cluster state"])
   931  		assert.Equal(t, "8192", manager.pgControlData["WAL block size"])
   932  	})
   933  
   934  	if err := mock.ExpectationsWereMet(); err != nil {
   935  		t.Errorf("there were unfulfilled expectations: %v", err)
   936  	}
   937  }
   938  
   939  func TestFollow(t *testing.T) {
   940  	ctx := context.TODO()
   941  	manager, mock, _ := MockDatabase(t)
   942  	defer mock.Close()
   943  	cluster := &dcs.Cluster{
   944  		Leader: &dcs.Leader{
   945  			Name: manager.CurrentMemberName,
   946  		},
   947  	}
   948  	fs = afero.NewMemMapFs()
   949  
   950  	t.Run("cluster has no leader now", func(t *testing.T) {
   951  		err := manager.follow(ctx, false, cluster)
   952  		assert.Nil(t, err)
   953  	})
   954  
   955  	cluster.Members = append(cluster.Members, dcs.Member{
   956  		Name: manager.CurrentMemberName,
   957  	})
   958  
   959  	t.Run("current member is leader", func(t *testing.T) {
   960  		err := manager.follow(ctx, false, cluster)
   961  		assert.Nil(t, err)
   962  	})
   963  
   964  	manager.CurrentMemberName = "test"
   965  
   966  	t.Run("open postgresql conf failed", func(t *testing.T) {
   967  		err := manager.follow(ctx, true, cluster)
   968  		assert.NotNil(t, err)
   969  	})
   970  
   971  	t.Run("open postgresql conf failed", func(t *testing.T) {
   972  		err := manager.follow(ctx, true, cluster)
   973  		assert.NotNil(t, err)
   974  	})
   975  
   976  	t.Run("follow without restart", func(t *testing.T) {
   977  		_, _ = fs.Create("/kubeblocks/conf/postgresql.conf")
   978  		mock.ExpectExec("select pg_reload_conf()").
   979  			WillReturnResult(pgxmock.NewResult("select", 1))
   980  
   981  		err := manager.follow(ctx, false, cluster)
   982  		assert.Nil(t, err)
   983  	})
   984  
   985  	if err := mock.ExpectationsWereMet(); err != nil {
   986  		t.Errorf("there were unfulfilled expectations: %v", err)
   987  	}
   988  }
   989  
   990  func TestHasOtherHealthyMembers(t *testing.T) {
   991  	ctx := context.TODO()
   992  	manager, mock, _ := MockDatabase(t)
   993  	defer mock.Close()
   994  	cluster := &dcs.Cluster{}
   995  	cluster.Members = append(cluster.Members, dcs.Member{
   996  		Name: manager.CurrentMemberName,
   997  	})
   998  
   999  	t.Run("", func(t *testing.T) {
  1000  		members := manager.HasOtherHealthyMembers(ctx, cluster, manager.CurrentMemberName)
  1001  		assert.Equal(t, 0, len(members))
  1002  	})
  1003  
  1004  	if err := mock.ExpectationsWereMet(); err != nil {
  1005  		t.Errorf("there were unfulfilled expectations: %v", err)
  1006  	}
  1007  }
  1008  
  1009  func TestGetPgControlData(t *testing.T) {
  1010  	manager, mock, _ := MockDatabase(t)
  1011  	defer mock.Close()
  1012  	defer func() {
  1013  		postgres.LocalCommander = postgres.NewExecCommander
  1014  	}()
  1015  
  1016  	t.Run("get pg control data failed", func(t *testing.T) {
  1017  		postgres.LocalCommander = postgres.NewFakeCommander(func() error {
  1018  			return fmt.Errorf("some error")
  1019  		}, nil, nil)
  1020  
  1021  		data := manager.getPgControlData()
  1022  		assert.Nil(t, data)
  1023  	})
  1024  
  1025  	t.Run("get pg control data success", func(t *testing.T) {
  1026  		fakeControlData := "pg_control version number:            1002\n" +
  1027  			"Data page checksum version:           0"
  1028  
  1029  		var stdout = bytes.NewBuffer([]byte(fakeControlData))
  1030  		postgres.LocalCommander = postgres.NewFakeCommander(func() error {
  1031  			return nil
  1032  		}, stdout, nil)
  1033  
  1034  		data := manager.getPgControlData()
  1035  		assert.NotNil(t, data)
  1036  		assert.Equal(t, "1002", data["pg_control version number"])
  1037  		assert.Equal(t, "0", data["Data page checksum version"])
  1038  	})
  1039  
  1040  	t.Run("pg control data has been set", func(t *testing.T) {
  1041  		manager.pgControlData = map[string]string{
  1042  			"Data page checksum version": "1",
  1043  		}
  1044  
  1045  		data := manager.getPgControlData()
  1046  		assert.NotNil(t, data)
  1047  		assert.Equal(t, "1", data["Data page checksum version"])
  1048  	})
  1049  }