code.vegaprotocol.io/vega@v0.79.0/datanode/sqlstore/sqlstore.go (about)

     1  // Copyright (C) 2023 Gobalsky Labs Limited
     2  //
     3  // This program is free software: you can redistribute it and/or modify
     4  // it under the terms of the GNU Affero General Public License as
     5  // published by the Free Software Foundation, either version 3 of the
     6  // License, or (at your option) any later version.
     7  //
     8  // This program is distributed in the hope that it will be useful,
     9  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    10  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    11  // GNU Affero General Public License for more details.
    12  //
    13  // You should have received a copy of the GNU Affero General Public License
    14  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    15  
    16  package sqlstore
    17  
    18  import (
    19  	"context"
    20  	"database/sql"
    21  	"embed"
    22  	"fmt"
    23  	"io"
    24  	"io/fs"
    25  	"time"
    26  
    27  	"code.vegaprotocol.io/vega/datanode/entities"
    28  	"code.vegaprotocol.io/vega/logging"
    29  	"code.vegaprotocol.io/vega/paths"
    30  
    31  	embeddedpostgres "github.com/fergusstrange/embedded-postgres"
    32  	"github.com/jackc/pgx/v4"
    33  	"github.com/jackc/pgx/v4/pgxpool"
    34  	"github.com/jackc/pgx/v4/stdlib"
    35  	"github.com/pkg/errors"
    36  	"github.com/pressly/goose/v3"
    37  	"github.com/shopspring/decimal"
    38  	"go.uber.org/zap"
    39  )
    40  
    41  var ErrBadID = errors.New("bad id (must be hex string)")
    42  
    43  //go:embed migrations/*.sql
    44  var EmbedMigrations embed.FS
    45  
    46  const (
    47  	SQLMigrationsDir = "migrations"
    48  	InfiniteInterval = "forever"
    49  	blocksEntity     = "blocks"
    50  )
    51  
    52  var defaultRetentionPolicies = map[RetentionPeriod][]RetentionPolicy{
    53  	RetentionPeriodStandard: {
    54  		{HypertableOrCaggName: "balances", DataRetentionPeriod: "7 days"},
    55  		{HypertableOrCaggName: "checkpoints", DataRetentionPeriod: "7 days"},
    56  		{HypertableOrCaggName: "conflated_balances", DataRetentionPeriod: "1 year"},
    57  		{HypertableOrCaggName: "delegations", DataRetentionPeriod: "7 days"},
    58  		{HypertableOrCaggName: "ledger", DataRetentionPeriod: "6 months"},
    59  		{HypertableOrCaggName: "orders", DataRetentionPeriod: "1 month"},
    60  		{HypertableOrCaggName: "trades", DataRetentionPeriod: "1 year"},
    61  		{HypertableOrCaggName: "trades_candle_1_minute", DataRetentionPeriod: "1 month"},
    62  		{HypertableOrCaggName: "trades_candle_5_minutes", DataRetentionPeriod: "1 month"},
    63  		{HypertableOrCaggName: "trades_candle_15_minutes", DataRetentionPeriod: "1 month"},
    64  		{HypertableOrCaggName: "trades_candle_30_minutes", DataRetentionPeriod: "2 months"},
    65  		{HypertableOrCaggName: "trades_candle_1_hour", DataRetentionPeriod: "1 year"},
    66  		{HypertableOrCaggName: "trades_candle_4_hours", DataRetentionPeriod: "1 year"},
    67  		{HypertableOrCaggName: "trades_candle_6_hours", DataRetentionPeriod: "1 year"},
    68  		{HypertableOrCaggName: "trades_candle_8_hours", DataRetentionPeriod: "1 year"},
    69  		{HypertableOrCaggName: "trades_candle_12_hours", DataRetentionPeriod: "1 year"},
    70  		{HypertableOrCaggName: "trades_candle_1_day", DataRetentionPeriod: "1 year"},
    71  		{HypertableOrCaggName: "trades_candle_7_days", DataRetentionPeriod: "10 years"},
    72  		{HypertableOrCaggName: "market_data", DataRetentionPeriod: "7 days"},
    73  		{HypertableOrCaggName: "margin_levels", DataRetentionPeriod: "7 days"},
    74  		{HypertableOrCaggName: "conflated_margin_levels", DataRetentionPeriod: "1 year"},
    75  		{HypertableOrCaggName: "positions", DataRetentionPeriod: "7 days"},
    76  		{HypertableOrCaggName: "conflated_positions", DataRetentionPeriod: "1 year"},
    77  		{HypertableOrCaggName: "liquidity_provisions", DataRetentionPeriod: "1 day"},
    78  		{HypertableOrCaggName: "markets", DataRetentionPeriod: "1 year"},
    79  		{HypertableOrCaggName: "deposits", DataRetentionPeriod: "1 year"},
    80  		{HypertableOrCaggName: "withdrawals", DataRetentionPeriod: "1 year"},
    81  		{HypertableOrCaggName: "blocks", DataRetentionPeriod: "1 year"},
    82  		{HypertableOrCaggName: "rewards", DataRetentionPeriod: "1 year"},
    83  		{HypertableOrCaggName: "stop_orders", DataRetentionPeriod: "1 month"},
    84  		{HypertableOrCaggName: "funding_period_data_points", DataRetentionPeriod: "1 year"},
    85  		{HypertableOrCaggName: "party_activity_streaks", DataRetentionPeriod: "1 year"},
    86  		{HypertableOrCaggName: "referral_programs", DataRetentionPeriod: "1 year"},
    87  		{HypertableOrCaggName: "referral_set_stats", DataRetentionPeriod: "1 year"},
    88  		{HypertableOrCaggName: "oracle_data", DataRetentionPeriod: "1 year"},
    89  		{HypertableOrCaggName: "oracle_data_oracle_specs", DataRetentionPeriod: "1 year"},
    90  		{HypertableOrCaggName: "vesting_stats", DataRetentionPeriod: "1 year"},
    91  		{HypertableOrCaggName: "volume_discount_stats", DataRetentionPeriod: "1 year"},
    92  		{HypertableOrCaggName: "referral_set_stats", DataRetentionPeriod: "1 year"},
    93  		{HypertableOrCaggName: "fees_stats", DataRetentionPeriod: "1 year"},
    94  		{HypertableOrCaggName: "funding_payment", DataRetentionPeriod: "1 year"},
    95  		{HypertableOrCaggName: "volume_discount_programs", DataRetentionPeriod: "1 year"},
    96  		{HypertableOrCaggName: "party_locked_balances", DataRetentionPeriod: "1 year"},
    97  		{HypertableOrCaggName: "party_vesting_balances", DataRetentionPeriod: "1 year"},
    98  		{HypertableOrCaggName: "party_vesting_stats", DataRetentionPeriod: "1 year"},
    99  		{HypertableOrCaggName: "fees_stats_by_party", DataRetentionPeriod: "1 year"},
   100  		{HypertableOrCaggName: "paid_liquidity_fees", DataRetentionPeriod: "1 year"},
   101  		{HypertableOrCaggName: "transfer_fees_discount", DataRetentionPeriod: "1 year"},
   102  		{HypertableOrCaggName: "time_weighted_notional_positions", DataRetentionPeriod: "1 month"},
   103  		{HypertableOrCaggName: "game_team_scores", DataRetentionPeriod: "1 month"},
   104  		{HypertableOrCaggName: "game_party_scores", DataRetentionPeriod: "1 month"},
   105  		{HypertableOrCaggName: "volume_rebate_programs", DataRetentionPeriod: "1 year"},
   106  		{HypertableOrCaggName: "volume_rebate_stats", DataRetentionPeriod: "1 year"},
   107  	},
   108  	RetentionPeriodArchive: {
   109  		{HypertableOrCaggName: "*", DataRetentionPeriod: string(RetentionPeriodArchive)},
   110  	},
   111  	RetentionPeriodLite: {
   112  		{HypertableOrCaggName: "*", DataRetentionPeriod: string(RetentionPeriodLite)},
   113  	},
   114  }
   115  
   116  func MigrateToLatestSchema(log *logging.Logger, config Config) error {
   117  	log = log.Named("db-migrate")
   118  	goose.SetBaseFS(EmbedMigrations)
   119  	goose.SetLogger(log.GooseLogger())
   120  	goose.SetVerbose(bool(config.VerboseMigration))
   121  
   122  	poolConfig, err := config.ConnectionConfig.GetPoolConfig()
   123  	if err != nil {
   124  		return fmt.Errorf("failed to get pool config:%w", err)
   125  	}
   126  
   127  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
   128  	defer db.Close()
   129  
   130  	log.Info("Checking database version and migrating sql schema to latest version, please wait...")
   131  	if err = goose.Up(db, SQLMigrationsDir); err != nil {
   132  		return fmt.Errorf("error migrating sql schema: %w", err)
   133  	}
   134  	log.Info("Sql schema migration completed successfully")
   135  
   136  	return nil
   137  }
   138  
   139  func MigrateUpToSchemaVersion(log *logging.Logger, config Config, version int64, fs fs.FS) error {
   140  	goose.SetBaseFS(fs)
   141  	goose.SetLogger(log.Named("db migration").GooseLogger())
   142  	goose.SetVerbose(bool(config.VerboseMigration))
   143  	goose.SetVerbose(true)
   144  
   145  	poolConfig, err := config.ConnectionConfig.GetPoolConfig()
   146  	if err != nil {
   147  		return fmt.Errorf("failed to get pool config:%w", err)
   148  	}
   149  
   150  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
   151  	defer db.Close()
   152  
   153  	log.Infof("Checking database version and migrating sql schema to version %d, please wait...", version)
   154  	if err = goose.UpTo(db, SQLMigrationsDir, version); err != nil {
   155  		return fmt.Errorf("error migrating sql schema: %w", err)
   156  	}
   157  	log.Info("Sql schema migration completed successfully")
   158  
   159  	return nil
   160  }
   161  
   162  func MigrateDownToSchemaVersion(log *logging.Logger, config Config, version int64, fs fs.FS) error {
   163  	goose.SetBaseFS(fs)
   164  	goose.SetLogger(log.Named("db migration").GooseLogger())
   165  	goose.SetVerbose(bool(config.VerboseMigration))
   166  	goose.SetVerbose(true)
   167  
   168  	poolConfig, err := config.ConnectionConfig.GetPoolConfig()
   169  	if err != nil {
   170  		return fmt.Errorf("failed to get pool config:%w", err)
   171  	}
   172  
   173  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
   174  	defer db.Close()
   175  
   176  	log.Infof("Checking database version and migrating sql schema to version %d, please wait...", version)
   177  	if err = goose.DownTo(db, SQLMigrationsDir, version); err != nil {
   178  		return fmt.Errorf("error migrating sql schema: %w", err)
   179  	}
   180  	log.Info("Sql schema migration completed successfully")
   181  
   182  	return nil
   183  }
   184  
   185  func RevertToSchemaVersionZero(log *logging.Logger, config ConnectionConfig, fs fs.FS, verbose bool) error {
   186  	log = log.Named("revert-schema-to-version-0")
   187  	goose.SetBaseFS(fs)
   188  	goose.SetLogger(log.GooseLogger())
   189  	goose.SetVerbose(verbose)
   190  
   191  	poolConfig, err := config.GetPoolConfig()
   192  	if err != nil {
   193  		return fmt.Errorf("failed to get pool config:%w", err)
   194  	}
   195  
   196  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
   197  	defer db.Close()
   198  
   199  	log.Info("Checking database version and reverting sql schema to version 0, please wait...")
   200  	if err := goose.DownTo(db, SQLMigrationsDir, 0); err != nil {
   201  		return fmt.Errorf("failed to goose down the schema to version 0: %w", err)
   202  	}
   203  	log.Info("Sql schema migration completed successfully")
   204  
   205  	return nil
   206  }
   207  
   208  // CheckSchemaVersionsSynced checks if if the current migrated version of the DB matches the latest available version.
   209  // If they are not in sync then we are running code expecting a particular version, but the rows/columns we ask for
   210  // might not exist.
   211  func CheckSchemaVersionsSynced(log *logging.Logger, config ConnectionConfig, fs fs.FS) error {
   212  	goose.SetBaseFS(fs)
   213  	goose.SetLogger(log.GooseLogger())
   214  	goose.SetVerbose(true)
   215  
   216  	log.Info("Checking database version matches code version")
   217  	poolConfig, err := config.GetPoolConfig()
   218  	if err != nil {
   219  		return fmt.Errorf("failed to get pool config: %w", err)
   220  	}
   221  
   222  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
   223  	defer db.Close()
   224  
   225  	current, err := goose.GetDBVersion(db)
   226  	if err != nil {
   227  		return err
   228  	}
   229  
   230  	migrations, err := goose.CollectMigrations(SQLMigrationsDir, current, current+1)
   231  	if err != nil {
   232  		return err
   233  	}
   234  
   235  	if migrations.Len() != 0 {
   236  		last, err := migrations.Last()
   237  		if err != nil {
   238  			return err
   239  		}
   240  		return fmt.Errorf("Schema is version %d, latest should be %d", current, last.Version)
   241  	}
   242  
   243  	return nil
   244  }
   245  
   246  func WipeDatabaseAndMigrateSchemaToVersion(log *logging.Logger, config ConnectionConfig, version int64, fs fs.FS, verbose bool) error {
   247  	log = log.Named("db-wipe-migrate")
   248  	goose.SetBaseFS(fs)
   249  	goose.SetLogger(log.GooseLogger())
   250  	goose.SetVerbose(verbose)
   251  
   252  	poolConfig, err := config.GetPoolConfig()
   253  	if err != nil {
   254  		return fmt.Errorf("failed to get pool config:%w", err)
   255  	}
   256  
   257  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
   258  	defer db.Close()
   259  
   260  	currentVersion, err := goose.GetDBVersion(db)
   261  	if err != nil {
   262  		return err
   263  	}
   264  
   265  	log.Infof("Wiping database and migrating schema to version %d", version)
   266  	if currentVersion > 0 {
   267  		if err := goose.DownTo(db, SQLMigrationsDir, 0); err != nil {
   268  			return fmt.Errorf("failed to goose down the schema: %w", err)
   269  		}
   270  	}
   271  
   272  	if version > 0 {
   273  		if err := goose.UpTo(db, SQLMigrationsDir, version); err != nil {
   274  			return fmt.Errorf("failed to goose up the schema: %w", err)
   275  		}
   276  	}
   277  	log.Info("Sql schema migration completed successfully")
   278  
   279  	return nil
   280  }
   281  
   282  func WipeDatabaseAndMigrateSchemaToLatestVersion(log *logging.Logger, config ConnectionConfig, fs fs.FS, verbose bool) error {
   283  	log = log.Named("db-wipe-migrate")
   284  	goose.SetBaseFS(fs)
   285  	goose.SetLogger(log.GooseLogger())
   286  	goose.SetVerbose(verbose)
   287  
   288  	poolConfig, err := config.GetPoolConfig()
   289  	if err != nil {
   290  		return fmt.Errorf("failed to get pool config:%w", err)
   291  	}
   292  
   293  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
   294  	defer db.Close()
   295  
   296  	currentVersion, err := goose.GetDBVersion(db)
   297  	if err != nil {
   298  		return err
   299  	}
   300  
   301  	log.Info("Wiping database and migrating schema to latest version")
   302  	if currentVersion > 0 {
   303  		if err := goose.DownTo(db, SQLMigrationsDir, 0); err != nil {
   304  			return fmt.Errorf("failed to goose down the schema: %w", err)
   305  		}
   306  	}
   307  
   308  	if err := goose.Up(db, SQLMigrationsDir); err != nil {
   309  		return fmt.Errorf("failed to goose up the schema: %w", err)
   310  	}
   311  	log.Info("Sql schema migration completed successfully")
   312  
   313  	return nil
   314  }
   315  
   316  func HasVegaSchema(ctx context.Context, conn Connection) (bool, error) {
   317  	tableNames, err := GetAllTableNames(ctx, conn)
   318  	if err != nil {
   319  		return false, fmt.Errorf("failed to get all table names:%w", err)
   320  	}
   321  
   322  	return len(tableNames) != 0, nil
   323  }
   324  
   325  func GetAllTableNames(ctx context.Context, conn Connection) ([]string, error) {
   326  	tableNameRows, err := conn.Query(ctx, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' and table_type = 'BASE TABLE' and table_name != 'goose_db_version' order by table_name")
   327  	if err != nil {
   328  		return nil, fmt.Errorf("failed to query table names:%w", err)
   329  	}
   330  
   331  	var tableNames []string
   332  	for tableNameRows.Next() {
   333  		tableName := ""
   334  		err = tableNameRows.Scan(&tableName)
   335  		if err != nil {
   336  			return nil, fmt.Errorf("failed to scan table Name:%w", err)
   337  		}
   338  		tableNames = append(tableNames, tableName)
   339  	}
   340  	return tableNames, nil
   341  }
   342  
   343  func RecreateVegaDatabase(ctx context.Context, log *logging.Logger, connConfig ConnectionConfig) error {
   344  	postgresDbConn, err := pgx.Connect(context.Background(), connConfig.GetConnectionStringForPostgresDatabase())
   345  	if err != nil {
   346  		return fmt.Errorf("unable to connect to database:%w", err)
   347  	}
   348  
   349  	defer func() {
   350  		err := postgresDbConn.Close(ctx)
   351  		if err != nil {
   352  			log.Errorf("error closing database connection:%v", err)
   353  		}
   354  	}()
   355  
   356  	err = dropDatabaseWithRetry(ctx, postgresDbConn, connConfig)
   357  	if err != nil {
   358  		return fmt.Errorf("failed to drop database:%w", err)
   359  	}
   360  
   361  	_, err = postgresDbConn.Exec(ctx, fmt.Sprintf("CREATE DATABASE %s TEMPLATE template0 LC_COLLATE 'C' LC_CTYPE 'C';", connConfig.Database))
   362  	if err != nil {
   363  		return fmt.Errorf("unable to create database:%w", err)
   364  	}
   365  	return nil
   366  }
   367  
   368  type DatanodeBlockSpan struct {
   369  	FromHeight int64
   370  	ToHeight   int64
   371  	HasData    bool
   372  }
   373  
   374  func GetDatanodeBlockSpan(ctx context.Context, connPool *pgxpool.Pool) (DatanodeBlockSpan, error) {
   375  	hasVegaSchema, err := HasVegaSchema(ctx, connPool)
   376  	if err != nil {
   377  		return DatanodeBlockSpan{}, fmt.Errorf("failed to get check is database if empty:%w", err)
   378  	}
   379  
   380  	var span DatanodeBlockSpan
   381  	if hasVegaSchema {
   382  		oldestBlock, err := GetOldestHistoryBlockUsingConnection(ctx, connPool)
   383  		if err != nil {
   384  			if errors.Is(err, entities.ErrNotFound) {
   385  				return DatanodeBlockSpan{
   386  					HasData: false,
   387  				}, nil
   388  			}
   389  			return DatanodeBlockSpan{}, fmt.Errorf("failed to get oldest history block:%w", err)
   390  		}
   391  
   392  		lastBlock, err := GetLastBlockUsingConnection(ctx, connPool)
   393  		if err != nil {
   394  			return DatanodeBlockSpan{}, fmt.Errorf("failed to get last block:%w", err)
   395  		}
   396  
   397  		span = DatanodeBlockSpan{
   398  			FromHeight: oldestBlock.Height,
   399  			ToHeight:   lastBlock.Height,
   400  			HasData:    true,
   401  		}
   402  	}
   403  
   404  	return span, nil
   405  }
   406  
   407  func dropDatabaseWithRetry(parentCtx context.Context, postgresDbConn *pgx.Conn, connConfig ConnectionConfig) error {
   408  	var err error
   409  	for i := 0; i < 5; i++ {
   410  		ctx, cancelFn := context.WithTimeout(parentCtx, 20*time.Second)
   411  		_, err = postgresDbConn.Exec(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s WITH ( FORCE )", connConfig.Database))
   412  		cancelFn()
   413  		if err == nil {
   414  			break
   415  		}
   416  		time.Sleep(5 * time.Second)
   417  	}
   418  	if err != nil {
   419  		return fmt.Errorf("unable to drop existing database:%w", err)
   420  	}
   421  	return nil
   422  }
   423  
   424  const oneDayAsSeconds = 60 * 60 * 24
   425  
   426  func getRetentionEntities(db *sql.DB) ([]string, error) {
   427  	rows, err := db.Query(`
   428  select view_name as table_name
   429  from timescaledb_information.continuous_aggregates WHERE hypertable_schema='public'
   430  union all
   431  select hypertable_name
   432  from timescaledb_information.hypertables WHERE hypertable_schema='public';
   433  `)
   434  	if err != nil {
   435  		return nil, err
   436  	}
   437  
   438  	retentionEntities := make([]string, 0)
   439  	defer rows.Close()
   440  
   441  	for rows.Next() {
   442  		var entity string
   443  		err = rows.Scan(&entity)
   444  		if err != nil {
   445  			return nil, err
   446  		}
   447  		retentionEntities = append(retentionEntities, entity)
   448  	}
   449  
   450  	return retentionEntities, nil
   451  }
   452  
   453  func getPolicy[T HypertableOverride](entity string, policies []T) (T, bool) {
   454  	var defaultPolicy T
   455  	for _, override := range policies {
   456  		if override.EntityName() == entity {
   457  			return override, true
   458  		}
   459  	}
   460  	return defaultPolicy, false
   461  }
   462  
   463  func setRetentionPolicy(db *sql.DB, entity string, policy string, log *logging.Logger) error {
   464  	if policy == "" {
   465  		return nil
   466  	}
   467  	if _, err := db.Exec(fmt.Sprintf("SELECT remove_retention_policy('%s', true);", entity)); err != nil {
   468  		return fmt.Errorf("removing retention policy from %s: %w", entity, err)
   469  	}
   470  
   471  	log.Info("Setting retention policy", zap.String("entity", entity), zap.String("policy", policy))
   472  	// If we're keeping data forever, don't bother adding a policy at all
   473  	if policy == InfiniteInterval {
   474  		return nil
   475  	}
   476  
   477  	if _, err := db.Exec(fmt.Sprintf("SELECT add_retention_policy('%s', INTERVAL '%s');", entity, policy)); err != nil {
   478  		return fmt.Errorf("adding retention policy to %s: %w", entity, err)
   479  	}
   480  
   481  	return nil
   482  }
   483  
   484  func setChunkInterval(db *sql.DB, entity string, interval string, log *logging.Logger) error {
   485  	if interval == "" {
   486  		return nil
   487  	}
   488  
   489  	log.Info("Setting chunk interval", zap.String("entity", entity), zap.String("interval", interval))
   490  	if _, err := db.Exec(fmt.Sprintf("SELECT set_chunk_time_interval('%s', INTERVAL '%s');", entity, interval)); err != nil {
   491  		return fmt.Errorf("setting chunk interval for %s: %w", entity, err)
   492  	}
   493  
   494  	return nil
   495  }
   496  
   497  func ApplyDataRetentionPolicies(config Config, log *logging.Logger) error {
   498  	poolConfig, err := config.ConnectionConfig.GetPoolConfig()
   499  	if err != nil {
   500  		return errors.Wrap(err, "applying data retention policy")
   501  	}
   502  
   503  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
   504  	defer db.Close()
   505  
   506  	// get the hypertables and caggs that have been created for data node
   507  	retentionEntities, err := getRetentionEntities(db)
   508  	if err != nil {
   509  		// We should panic here because something must be wrong
   510  		panic(fmt.Errorf("getting entities with retention policies: %w", err))
   511  	}
   512  
   513  	// This is the default retention period the data-node is operating with
   514  	retentionPeriod := config.RetentionPeriod
   515  	// These are any retention policy overrides that have been set by the user
   516  	overridePolicies := config.RetentionPolicies
   517  	// These are any chunk interval overrides that have been set by the user
   518  	overrideChunkIntervals := config.ChunkIntervals
   519  
   520  	defaultPolicies := defaultRetentionPolicies[retentionPeriod]
   521  
   522  	var maxRetentionPeriodInSecs int64
   523  	var blocksRetentionPolicy string
   524  
   525  	for _, entity := range retentionEntities {
   526  		if retentionPeriod == RetentionPeriodLite || retentionPeriod == RetentionPeriodArchive {
   527  			policy := defaultPolicies[0]
   528  			overrideRetention, ok := getPolicy(entity, overridePolicies)
   529  			if ok && overrideRetention.DataRetentionPeriod != "" { // we have found an override policy so apply it instead of the default
   530  				policy = overrideRetention
   531  			}
   532  
   533  			// Set the default retention period
   534  			if err := setRetentionPolicy(db, entity, policy.DataRetentionPeriod, log); err != nil {
   535  				return fmt.Errorf("setting retention policy for %s to %s: %w", entity, policy.DataRetentionPeriod, err)
   536  			}
   537  
   538  			overrideChunkInterval, ok := getPolicy(entity, overrideChunkIntervals)
   539  			if ok && overrideChunkInterval.ChunkInterval != "" {
   540  				if err := setChunkInterval(db, entity, overrideChunkInterval.ChunkInterval, log); err != nil {
   541  					return fmt.Errorf("setting chunk interval for %s to %s: %w", entity, overrideChunkInterval.ChunkInterval, err)
   542  				}
   543  			}
   544  
   545  			continue
   546  		}
   547  
   548  		if entity == blocksEntity {
   549  			// we should ignore this for now because blocks retention policy needs to be as long as the longest retention period
   550  			continue
   551  		}
   552  
   553  		// if the retention period is the standard period, we need to check that a default has been defined, otherwise we should panic
   554  		policy, ok := getPolicy(entity, defaultPolicies)
   555  		if !ok {
   556  			// The development team have omitted a default retention policy for this entity, we should panic here.
   557  			panic(fmt.Errorf("no default retention policy defined for %s", entity))
   558  		}
   559  
   560  		override, ok := getPolicy(entity, overridePolicies)
   561  		if ok && override.DataRetentionPeriod != "" { // we have found an override policy so apply it instead of the default
   562  			policy = override
   563  		}
   564  
   565  		aboveMinimum, retentionPeriodInSecs, err := checkPolicyPeriodIsAtOrAboveMinimum(oneDayAsSeconds, policy, db)
   566  		if err != nil {
   567  			return fmt.Errorf("checking retention policy period is above minimum:%w", err)
   568  		}
   569  
   570  		if retentionPeriodInSecs > maxRetentionPeriodInSecs {
   571  			maxRetentionPeriodInSecs = retentionPeriodInSecs
   572  			blocksRetentionPolicy = policy.DataRetentionPeriod
   573  		}
   574  
   575  		if !config.DisableMinRetentionPolicyCheckForUseInSysTestsOnly {
   576  			// We have this check to avoid the datanode removing data that is required for creating data snapshots
   577  			if !aboveMinimum {
   578  				return fmt.Errorf("policy for %s has a retention time less than one day, one day is the minimum permitted", policy.HypertableOrCaggName)
   579  			}
   580  		}
   581  
   582  		// Set the default retention period
   583  		if err := setRetentionPolicy(db, entity, policy.DataRetentionPeriod, log); err != nil {
   584  			return fmt.Errorf("setting retention policy for %s to %s: %w", entity, policy.DataRetentionPeriod, err)
   585  		}
   586  
   587  		overrideChunkInterval, ok := getPolicy(entity, overrideChunkIntervals)
   588  		if ok && overrideChunkInterval.ChunkInterval != "" {
   589  			if err := setChunkInterval(db, entity, overrideChunkInterval.ChunkInterval, log); err != nil {
   590  				return fmt.Errorf("setting chunk interval for %s to %s: %w", entity, overrideChunkInterval.ChunkInterval, err)
   591  			}
   592  		}
   593  	}
   594  
   595  	// finally if the retention period is the standard period, we need to set the blocks retention policy to the longest retention period
   596  	if retentionPeriod == RetentionPeriodStandard {
   597  		if err := setRetentionPolicy(db, blocksEntity, blocksRetentionPolicy, log); err != nil {
   598  			return fmt.Errorf("setting retention policy for %s to %s: %w", blocksEntity, blocksRetentionPolicy, err)
   599  		}
   600  	}
   601  
   602  	return nil
   603  }
   604  
   605  func retentionPeriodToSeconds(db *sql.DB, retentionPeriod string) (int64, error) {
   606  	query := fmt.Sprintf("SELECT EXTRACT(epoch FROM INTERVAL '%s')", retentionPeriod)
   607  	row := db.QueryRow(query)
   608  
   609  	var seconds decimal.Decimal
   610  	err := row.Scan(&seconds)
   611  	if err != nil {
   612  		return 0, fmt.Errorf("failed to get interval in seconds for retention period %s: %w", retentionPeriod, err)
   613  	}
   614  
   615  	return seconds.IntPart(), nil
   616  }
   617  
   618  func checkPolicyPeriodIsAtOrAboveMinimum(minimumInSeconds int64, policy RetentionPolicy, db *sql.DB) (bool, int64, error) {
   619  	if policy.DataRetentionPeriod == InfiniteInterval {
   620  		return true, 0, nil
   621  	}
   622  
   623  	secs, err := retentionPeriodToSeconds(db, policy.DataRetentionPeriod)
   624  	if err != nil {
   625  		return false, 0, fmt.Errorf("failed to get interval in seconds for policy %s: %w", policy.HypertableOrCaggName, err)
   626  	}
   627  
   628  	return secs >= minimumInSeconds, secs, nil
   629  }
   630  
   631  type EmbeddedPostgresLog interface {
   632  	io.Writer
   633  }
   634  
   635  func StartEmbeddedPostgres(log *logging.Logger, config Config, runtimeDir string, postgresLog EmbeddedPostgresLog) (*embeddedpostgres.EmbeddedPostgres, error) {
   636  	log = log.Named("embedded-postgres")
   637  	log.SetLevel(config.Level.Get())
   638  	embeddedPostgresDataPath := paths.JoinStatePath(paths.StatePath(runtimeDir), "node-data")
   639  
   640  	embeddedPostgres := createEmbeddedPostgres(runtimeDir, &embeddedPostgresDataPath,
   641  		postgresLog, config.ConnectionConfig)
   642  
   643  	if err := embeddedPostgres.Start(); err != nil {
   644  		log.Errorf("error starting embedded postgres: %v", err)
   645  		return nil, fmt.Errorf("use embedded database was true, but failed to start: %w", err)
   646  	}
   647  
   648  	return embeddedPostgres, nil
   649  }
   650  
   651  func createEmbeddedPostgres(runtimePath string, dataPath *paths.StatePath, writer io.Writer, conf ConnectionConfig) *embeddedpostgres.EmbeddedPostgres {
   652  	dbConfig := embeddedpostgres.DefaultConfig().
   653  		Username(conf.Username).
   654  		Password(conf.Password).
   655  		Database(conf.Database).
   656  		Port(uint32(conf.Port)).
   657  		ListenAddr(conf.Host).
   658  		SocketDir(conf.SocketDir).
   659  		Logger(writer)
   660  
   661  	if len(runtimePath) != 0 {
   662  		dbConfig = dbConfig.RuntimePath(runtimePath).BinariesPath(runtimePath)
   663  	}
   664  
   665  	if dataPath != nil {
   666  		dbConfig = dbConfig.DataPath(dataPath.String())
   667  	}
   668  
   669  	return embeddedpostgres.NewDatabase(dbConfig)
   670  }