code.vegaprotocol.io/vega@v0.79.0/datanode/networkhistory/service_test.go (about)

     1  // Copyright (C) 2023 Gobalsky Labs Limited
     2  //
     3  // This program is free software: you can redistribute it and/or modify
     4  // it under the terms of the GNU Affero General Public License as
     5  // published by the Free Software Foundation, either version 3 of the
     6  // License, or (at your option) any later version.
     7  //
     8  // This program is distributed in the hope that it will be useful,
     9  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    10  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    11  // GNU Affero General Public License for more details.
    12  //
    13  // You should have received a copy of the GNU Affero General Public License
    14  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    15  
    16  package networkhistory_test
    17  
    18  import (
    19  	"bytes"
    20  	"compress/gzip"
    21  	"context"
    22  	"crypto/md5"
    23  	"encoding/hex"
    24  	"errors"
    25  	"fmt"
    26  	"io"
    27  	"io/fs"
    28  	"os"
    29  	"path/filepath"
    30  	"strconv"
    31  	"strings"
    32  	"sync"
    33  	"testing"
    34  	"time"
    35  
    36  	"code.vegaprotocol.io/vega/cmd/data-node/commands/start"
    37  	"code.vegaprotocol.io/vega/core/events"
    38  	"code.vegaprotocol.io/vega/datanode/broker"
    39  	"code.vegaprotocol.io/vega/datanode/candlesv2"
    40  	config2 "code.vegaprotocol.io/vega/datanode/config"
    41  	"code.vegaprotocol.io/vega/datanode/entities"
    42  	"code.vegaprotocol.io/vega/datanode/networkhistory"
    43  	"code.vegaprotocol.io/vega/datanode/networkhistory/segment"
    44  	"code.vegaprotocol.io/vega/datanode/networkhistory/snapshot"
    45  	"code.vegaprotocol.io/vega/datanode/networkhistory/store"
    46  	"code.vegaprotocol.io/vega/datanode/service"
    47  	"code.vegaprotocol.io/vega/datanode/sqlstore"
    48  	"code.vegaprotocol.io/vega/datanode/utils/databasetest"
    49  	"code.vegaprotocol.io/vega/logging"
    50  	eventsv1 "code.vegaprotocol.io/vega/protos/vega/events/v1"
    51  
    52  	"github.com/jackc/pgx/v4/pgxpool"
    53  	"github.com/jackc/pgx/v4/stdlib"
    54  	"github.com/pressly/goose/v3"
    55  	uuid "github.com/satori/go.uuid"
    56  	"github.com/stretchr/testify/assert"
    57  	"github.com/stretchr/testify/require"
    58  )
    59  
    60  const (
    61  	snapshotInterval     = int64(1000)
    62  	chainID              = "testnet-001"
    63  	compressedEventsFile = "testdata/smoketest_to_block_5000.evts.gz"
    64  	numSnapshots         = 6
    65  	testMigrationSQL     = "testdata/testmigration.sql"
    66  )
    67  
    68  var (
    69  	sqlConfig              sqlstore.Config
    70  	networkHistoryConnPool *pgxpool.Pool
    71  
    72  	fromEventHashes             []string
    73  	fromEventsDatabaseSummaries []databaseSummary
    74  
    75  	fromEventsIntervalToHistoryTableDelta []map[string]tableDataSummary
    76  
    77  	snapshotsBackupDir string
    78  	eventsDir          string
    79  	eventsFile         string
    80  
    81  	goldenSourceHistorySegment map[int64]segment.Full
    82  
    83  	expectedHistorySegmentsFromHeights = []int64{1, 1001, 2001, 2501, 3001, 4001}
    84  	expectedHistorySegmentsToHeights   = []int64{1000, 2000, 2500, 3000, 4000, 5000}
    85  
    86  	networkHistoryStore *store.Store
    87  
    88  	postgresLog *bytes.Buffer
    89  
    90  	testMigrationsDir       string
    91  	highestMigrationNumber  int64
    92  	testMigrationVersionNum int64
    93  	sqlFs                   fs.FS
    94  )
    95  
    96  func TestMain(t *testing.M) {
    97  	outerCtx, cancelOuterCtx := context.WithCancel(context.Background())
    98  	defer cancelOuterCtx()
    99  
   100  	// because we have a ton of panics in here:
   101  	defer func() {
   102  		if r := recover(); r != nil {
   103  			cancelOuterCtx()
   104  			panic(r) // propagate panic
   105  		}
   106  	}()
   107  	testMigrationVersionNum, sqlFs = setupTestSQLMigrations()
   108  	highestMigrationNumber = testMigrationVersionNum - 1
   109  
   110  	var err error
   111  	snapshotsBackupDir, err = os.MkdirTemp("", "snapshotbackup")
   112  	if err != nil {
   113  		panic(err)
   114  	}
   115  	defer os.RemoveAll(snapshotsBackupDir)
   116  
   117  	eventsDir, err = os.MkdirTemp("", "eventsdir")
   118  	if err != nil {
   119  		panic(err)
   120  	}
   121  	defer os.RemoveAll(eventsDir)
   122  
   123  	log := logging.NewTestLogger()
   124  
   125  	eventsFile = filepath.Join(eventsDir, "smoketest_to_block_5000_or_above.evts")
   126  	decompressEventFile()
   127  
   128  	tempDir, err := os.MkdirTemp("", "networkhistory")
   129  	if err != nil {
   130  		panic(err)
   131  	}
   132  	postgresRuntimePath := filepath.Join(tempDir, "sqlstore")
   133  	defer os.RemoveAll(tempDir)
   134  
   135  	networkHistoryHome, err := os.MkdirTemp("", "networkhistoryhome")
   136  	if err != nil {
   137  		panic(err)
   138  	}
   139  	defer os.RemoveAll(networkHistoryHome)
   140  
   141  	defer func() {
   142  		if networkHistoryConnPool != nil {
   143  			networkHistoryConnPool.Close()
   144  		}
   145  	}()
   146  
   147  	exitCode := databasetest.TestMain(t, outerCtx, func(config sqlstore.Config, source *sqlstore.ConnectionSource,
   148  		pgLog *bytes.Buffer,
   149  	) {
   150  		sqlConfig = config
   151  		log.Infof("DB Connection String: ", sqlConfig.ConnectionConfig.GetConnectionString())
   152  
   153  		pool, err := sqlstore.CreateConnectionPool(outerCtx, sqlConfig.ConnectionConfig)
   154  		if err != nil {
   155  			panic(fmt.Errorf("failed to create connection pool: %w", err))
   156  		}
   157  		networkHistoryConnPool = pool
   158  
   159  		postgresLog = pgLog
   160  
   161  		emptyDatabaseAndSetSchemaVersion(highestMigrationNumber)
   162  
   163  		// Do initial run to get the expected state of the datanode from just event playback
   164  		ctx, cancel := context.WithCancel(outerCtx)
   165  		defer cancel()
   166  
   167  		snapshotCopyToPath := filepath.Join(networkHistoryHome, "snapshotsCopyTo")
   168  
   169  		snapshotService := setupSnapshotService(snapshotCopyToPath)
   170  
   171  		var snapshots []segment.Unpublished
   172  
   173  		ctxWithCancel, cancelFn := context.WithCancel(ctx)
   174  		defer cancelFn()
   175  
   176  		evtSource := newTestEventSourceWithProtocolUpdateMessage()
   177  
   178  		pus := service.NewProtocolUpgrade(nil, log)
   179  		puh := networkhistory.NewProtocolUpgradeHandler(log, pus, evtSource, func(ctx context.Context, chainID string,
   180  			toHeight int64,
   181  		) error {
   182  			ss, err := snapshotService.CreateSnapshot(ctx, chainID, toHeight)
   183  			if err != nil {
   184  				panic(fmt.Errorf("failed to create snapshot: %w", err))
   185  			}
   186  
   187  			waitForSnapshotToComplete(ss)
   188  
   189  			snapshots = append(snapshots, ss)
   190  
   191  			md5Hash, err := Md5Hash(ss.UnpublishedSnapshotDataDirectory())
   192  			if err != nil {
   193  				panic(fmt.Errorf("failed to get snapshot hash:%w", err))
   194  			}
   195  
   196  			fromEventHashes = append(fromEventHashes, md5Hash)
   197  
   198  			updateAllContinuousAggregateData(ctx)
   199  			summary := getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
   200  
   201  			fromEventsDatabaseSummaries = append(fromEventsDatabaseSummaries, summary)
   202  
   203  			return nil
   204  		})
   205  
   206  		preUpgradeBroker, err := setupSQLBroker(outerCtx, sqlConfig, snapshotService,
   207  			func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
   208  				if lastCommittedBlockHeight > 0 && lastCommittedBlockHeight%snapshotInterval == 0 {
   209  					lastSnapshot, err := service.CreateSnapshotAsynchronously(ctx, chainId, lastCommittedBlockHeight)
   210  					if err != nil {
   211  						panic(fmt.Errorf("failed to create snapshot:%w", err))
   212  					}
   213  
   214  					waitForSnapshotToComplete(lastSnapshot)
   215  					snapshots = append(snapshots, lastSnapshot)
   216  					md5Hash, err := Md5Hash(lastSnapshot.UnpublishedSnapshotDataDirectory())
   217  					if err != nil {
   218  						panic(fmt.Errorf("failed to get snapshot hash:%w", err))
   219  					}
   220  
   221  					fromEventHashes = append(fromEventHashes, md5Hash)
   222  
   223  					updateAllContinuousAggregateData(ctx)
   224  					summary := getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
   225  
   226  					fromEventsDatabaseSummaries = append(fromEventsDatabaseSummaries, summary)
   227  
   228  					if lastCommittedBlockHeight == numSnapshots*snapshotInterval {
   229  						cancelFn()
   230  					}
   231  				}
   232  			},
   233  			evtSource, puh)
   234  		if err != nil {
   235  			panic(fmt.Errorf("failed to setup pre-protocol upgrade sqlbroker:%w", err))
   236  		}
   237  
   238  		err = preUpgradeBroker.Receive(ctxWithCancel)
   239  		if err != nil && !errors.Is(err, context.Canceled) {
   240  			panic(fmt.Errorf("failed to process events:%w", err))
   241  		}
   242  
   243  		protocolUpgradeStarted := pus.GetProtocolUpgradeStarted()
   244  		if !protocolUpgradeStarted {
   245  			panic("expected protocol upgrade to have started")
   246  		}
   247  
   248  		// Here after exit of the broker because of protocol upgrade, we simulate a restart of the node by recreating
   249  		// the broker.
   250  		// First simulate a schema update
   251  		err = migrateUpToDatabaseVersion(testMigrationVersionNum)
   252  		if err != nil {
   253  			panic(err)
   254  		}
   255  
   256  		pus = service.NewProtocolUpgrade(nil, log)
   257  		nonInterceptPuh := networkhistory.NewProtocolUpgradeHandler(log, pus, evtSource, func(ctx context.Context,
   258  			chainID string, toHeight int64,
   259  		) error {
   260  			return nil
   261  		})
   262  
   263  		postUpgradeBroker, err := setupSQLBroker(outerCtx, sqlConfig, snapshotService,
   264  			func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
   265  				if lastCommittedBlockHeight > 0 && lastCommittedBlockHeight%snapshotInterval == 0 {
   266  					lastSnapshot, err := service.CreateSnapshotAsynchronously(ctx, chainId, lastCommittedBlockHeight)
   267  					if err != nil {
   268  						panic(fmt.Errorf("failed to create snapshot:%w", err))
   269  					}
   270  
   271  					waitForSnapshotToComplete(lastSnapshot)
   272  					snapshots = append(snapshots, lastSnapshot)
   273  					md5Hash, err := Md5Hash(lastSnapshot.UnpublishedSnapshotDataDirectory())
   274  					if err != nil {
   275  						panic(fmt.Errorf("failed to get snapshot hash:%w", err))
   276  					}
   277  
   278  					fromEventHashes = append(fromEventHashes, md5Hash)
   279  
   280  					updateAllContinuousAggregateData(ctx)
   281  					summary := getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
   282  
   283  					fromEventsDatabaseSummaries = append(fromEventsDatabaseSummaries, summary)
   284  
   285  					if lastCommittedBlockHeight == (numSnapshots-1)*snapshotInterval {
   286  						cancelFn()
   287  					}
   288  				}
   289  			},
   290  			evtSource, nonInterceptPuh)
   291  		if err != nil {
   292  			panic(fmt.Errorf("failed to setup post protocol upgrade sqlbroker:%w", err))
   293  		}
   294  
   295  		err = postUpgradeBroker.Receive(ctxWithCancel)
   296  		if err != nil && !errors.Is(err, context.Canceled) {
   297  			panic(fmt.Errorf("failed to process events:%w", err))
   298  		}
   299  
   300  		if len(fromEventHashes) != numSnapshots {
   301  			panic(fmt.Errorf("expected 5 snapshots, got %d", len(fromEventHashes)))
   302  		}
   303  
   304  		if len(fromEventsDatabaseSummaries) != numSnapshots {
   305  			panic(fmt.Errorf("expected %d database summaries, got %d", numSnapshots, len(fromEventHashes)))
   306  		}
   307  
   308  		fromEventsIntervalToHistoryTableDelta = getSnapshotIntervalToHistoryTableDeltaSummary(ctx, sqlConfig.ConnectionConfig,
   309  			expectedHistorySegmentsFromHeights, expectedHistorySegmentsToHeights)
   310  
   311  		if len(fromEventsIntervalToHistoryTableDelta) != numSnapshots {
   312  			panic(fmt.Errorf("expected %d history table deltas, got %d", numSnapshots, len(fromEventHashes)))
   313  		}
   314  
   315  		// Network history store setup
   316  		storeCfg := store.NewDefaultConfig()
   317  		storeCfg.SwarmKeyOverride = uuid.NewV4().String()
   318  
   319  		storeCfg.SwarmPort = databasetest.GetNextFreePort()
   320  
   321  		storeLog := logging.NewTestLogger()
   322  		storeLog.SetLevel(logging.InfoLevel)
   323  		networkHistoryStore, err = store.New(outerCtx, storeLog, chainID, storeCfg, networkHistoryHome, 33)
   324  		if err != nil {
   325  			panic(err)
   326  		}
   327  
   328  		datanodeConfig := config2.NewDefaultConfig()
   329  		cfg := networkhistory.NewDefaultConfig()
   330  
   331  		_, err = networkhistory.New(outerCtx, log, chainID, cfg, networkHistoryConnPool, snapshotService,
   332  			networkHistoryStore, datanodeConfig.API.Port, snapshotCopyToPath)
   333  
   334  		if err != nil {
   335  			panic(err)
   336  		}
   337  
   338  		startTime := time.Now()
   339  		timeout := 1 * time.Minute
   340  
   341  		for {
   342  			if time.Now().After(startTime.Add(timeout)) {
   343  				panic(fmt.Sprintf("history not found in network store after %s", timeout))
   344  			}
   345  
   346  			time.Sleep(10 * time.Millisecond)
   347  
   348  			storedSegments, err := networkHistoryStore.ListAllIndexEntriesOldestFirst()
   349  			if err != nil {
   350  				panic(err)
   351  			}
   352  
   353  			goldenSourceHistorySegment = map[int64]segment.Full{}
   354  			for _, storedSegment := range storedSegments {
   355  				goldenSourceHistorySegment[storedSegment.HeightTo] = storedSegment
   356  			}
   357  
   358  			allExpectedSegmentsFound := true
   359  			for _, expected := range expectedHistorySegmentsToHeights {
   360  				if _, ok := goldenSourceHistorySegment[expected]; !ok {
   361  					allExpectedSegmentsFound = false
   362  					break
   363  				}
   364  			}
   365  
   366  			if allExpectedSegmentsFound {
   367  				break
   368  			}
   369  		}
   370  
   371  		// For the same events file and block height the history segment ID should be the same across all OS/Arch
   372  		// If the events file is updated, schema changes, or snapshot height changed this will need updating
   373  		// Easiest way to update is to put a breakpoint here or inspect the log for the lines printed below
   374  		log.Info("expected history segment IDs:")
   375  		log.Infof("%s", goldenSourceHistorySegment[1000].HistorySegmentID)
   376  		log.Infof("%s", goldenSourceHistorySegment[2000].HistorySegmentID)
   377  		log.Infof("%s", goldenSourceHistorySegment[2500].HistorySegmentID)
   378  		log.Infof("%s", goldenSourceHistorySegment[3000].HistorySegmentID)
   379  		log.Infof("%s", goldenSourceHistorySegment[4000].HistorySegmentID)
   380  		log.Infof("%s", goldenSourceHistorySegment[5000].HistorySegmentID)
   381  
   382  		panicIfHistorySegmentIdsNotEqual(goldenSourceHistorySegment[1000].HistorySegmentID, "QmYb9JTy7gviDLeQG69LGg3Y1DnwcewhoUUUmxfpGzLVKo", snapshots)
   383  		panicIfHistorySegmentIdsNotEqual(goldenSourceHistorySegment[2000].HistorySegmentID, "QmSg6gxZsuFYh2UdyNzx65N6LursqvUhrkBMLoiLuHNQon", snapshots)
   384  		panicIfHistorySegmentIdsNotEqual(goldenSourceHistorySegment[2500].HistorySegmentID, "QmSaBaig6JhAVjTpjfF5XbHaDynZPGmTyaKcjnH5FNFdw6", snapshots)
   385  		panicIfHistorySegmentIdsNotEqual(goldenSourceHistorySegment[3000].HistorySegmentID, "QmcYVJ4nMiEotA4TjjEGShzP3YfJA8VTjW6G7GjC1Em7P5", snapshots)
   386  		panicIfHistorySegmentIdsNotEqual(goldenSourceHistorySegment[4000].HistorySegmentID, "QmVjNA91oqTCHyWRuuLBAjadSrWeJsWdJ6jUYWECx9sTYi", snapshots)
   387  		panicIfHistorySegmentIdsNotEqual(goldenSourceHistorySegment[5000].HistorySegmentID, "QmXoDjTNwEB1AvEumWDWQEk5r6rHhxgTfiUWcHtZFJUbrk", snapshots)
   388  	}, postgresRuntimePath, sqlFs)
   389  
   390  	if exitCode != 0 {
   391  		log.Errorf("One or more tests failed, dumping postgres log:\n%s", postgresLog.String())
   392  	}
   393  }
   394  
   395  func updateAllContinuousAggregateData(ctx context.Context) {
   396  	blockspan, err := sqlstore.GetDatanodeBlockSpan(ctx, networkHistoryConnPool)
   397  	if err != nil {
   398  		panic(err)
   399  	}
   400  
   401  	err = snapshot.UpdateContinuousAggregateDataFromHighWaterMark(ctx, networkHistoryConnPool, blockspan.ToHeight)
   402  	if err != nil {
   403  		panic(err)
   404  	}
   405  }
   406  
   407  func TestLoadingDataFetchedAsynchronously(t *testing.T) {
   408  	ctx, cancel := context.WithCancel(context.Background())
   409  	defer cancel()
   410  
   411  	log := logging.NewTestLogger()
   412  
   413  	require.NoError(t, networkHistoryStore.ResetIndex())
   414  	emptyDatabaseAndSetSchemaVersion(highestMigrationNumber)
   415  
   416  	snapshotCopyToPath := t.TempDir()
   417  	snapshotService := setupSnapshotService(snapshotCopyToPath)
   418  
   419  	fetched, err := fetchBlocks(ctx, log, networkHistoryStore, goldenSourceHistorySegment[4000].HistorySegmentID, 1000)
   420  	require.NoError(t, err)
   421  	require.Equal(t, int64(1000), fetched)
   422  
   423  	networkhistoryService := setupNetworkHistoryService(ctx, log, snapshotService, networkHistoryStore, snapshotCopyToPath)
   424  	segments, err := networkhistoryService.ListAllHistorySegments()
   425  	require.NoError(t, err)
   426  
   427  	chunk, err := segments.MostRecentContiguousHistory()
   428  	require.NoError(t, err)
   429  
   430  	loaded, err := networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   431  	require.NoError(t, err)
   432  	assert.Equal(t, int64(3001), loaded.LoadedFromHeight)
   433  	assert.Equal(t, int64(4000), loaded.LoadedToHeight)
   434  
   435  	dbSummary := getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
   436  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[4].currentTableSummaries, dbSummary.currentTableSummaries)
   437  
   438  	// Run events to height 5000
   439  	ctxWithCancel, cancelFn := context.WithCancel(ctx)
   440  	evtSource := newTestEventSourceWithProtocolUpdateMessage()
   441  
   442  	pus := service.NewProtocolUpgrade(nil, log)
   443  	puh := networkhistory.NewProtocolUpgradeHandler(log, pus, evtSource, func(ctx context.Context, chainID string,
   444  		toHeight int64,
   445  	) error {
   446  		return nil
   447  	})
   448  
   449  	var md5Hash string
   450  	broker, err := setupSQLBroker(ctx, sqlConfig, snapshotService,
   451  		func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
   452  			if lastCommittedBlockHeight > 0 && lastCommittedBlockHeight%snapshotInterval == 0 {
   453  				ss, err := service.CreateSnapshotAsynchronously(ctx, chainId, lastCommittedBlockHeight)
   454  				require.NoError(t, err)
   455  
   456  				waitForSnapshotToComplete(ss)
   457  
   458  				md5Hash, err = Md5Hash(ss.UnpublishedSnapshotDataDirectory())
   459  				require.NoError(t, err)
   460  
   461  				fromEventHashes = append(fromEventHashes, md5Hash)
   462  			}
   463  
   464  			if lastCommittedBlockHeight == 5000 {
   465  				cancelFn()
   466  			}
   467  		},
   468  		evtSource, puh)
   469  	require.NoError(t, err)
   470  
   471  	err = broker.Receive(ctxWithCancel)
   472  	if err != nil && !errors.Is(err, context.Canceled) {
   473  		require.NoError(t, err)
   474  	}
   475  
   476  	require.Equal(t, fromEventHashes[5], md5Hash)
   477  
   478  	networkhistoryService.PublishSegments(ctx)
   479  
   480  	// Now simulate the situation where the previous history segments were fetched asynchronously during event processing
   481  	// and full history is then subsequently loaded
   482  	emptyDatabaseAndSetSchemaVersion(0)
   483  
   484  	fetched, err = fetchBlocks(ctx, log, networkHistoryStore, goldenSourceHistorySegment[3000].HistorySegmentID, 3000)
   485  	require.NoError(t, err)
   486  	require.Equal(t, int64(3000), fetched)
   487  
   488  	segments, err = networkhistoryService.ListAllHistorySegments()
   489  	require.NoError(t, err)
   490  
   491  	segmentsInRange, err := segments.ContiguousHistoryInRange(1, 5000)
   492  	require.NoError(t, err)
   493  	loaded, err = networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, segmentsInRange, sqlConfig.ConnectionConfig, false, false)
   494  	require.NoError(t, err)
   495  	assert.Equal(t, int64(1), loaded.LoadedFromHeight)
   496  	assert.Equal(t, int64(5000), loaded.LoadedToHeight)
   497  
   498  	dbSummary = getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
   499  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[5].currentTableSummaries, dbSummary.currentTableSummaries)
   500  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[5].historyTableSummaries, dbSummary.historyTableSummaries)
   501  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[5].caggSummaries, dbSummary.caggSummaries)
   502  }
   503  
   504  func TestRestoringNodeThatAlreadyContainsData(t *testing.T) {
   505  	ctx, cancel := context.WithCancel(context.Background())
   506  	defer cancel()
   507  
   508  	log := logging.NewTestLogger()
   509  
   510  	require.NoError(t, networkHistoryStore.ResetIndex())
   511  	emptyDatabaseAndSetSchemaVersion(highestMigrationNumber)
   512  
   513  	snapshotCopyToPath := t.TempDir()
   514  	snapshotService := setupSnapshotService(snapshotCopyToPath)
   515  
   516  	ctxWithCancel, cancelFn := context.WithCancel(ctx)
   517  
   518  	evtSource := newTestEventSourceWithProtocolUpdateMessage()
   519  
   520  	pus := service.NewProtocolUpgrade(nil, log)
   521  	puh := networkhistory.NewProtocolUpgradeHandler(log, pus, evtSource, func(ctx context.Context, chainID string,
   522  		toHeight int64,
   523  	) error {
   524  		return nil
   525  	})
   526  
   527  	// Run events to height 1800
   528  
   529  	broker, err := setupSQLBroker(ctx, sqlConfig, snapshotService,
   530  		func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
   531  			if lastCommittedBlockHeight == 1800 {
   532  				cancelFn()
   533  			}
   534  		},
   535  		evtSource, puh)
   536  	require.NoError(t, err)
   537  
   538  	err = broker.Receive(ctxWithCancel)
   539  	if err != nil && !errors.Is(err, context.Canceled) {
   540  		require.NoError(t, err)
   541  	}
   542  
   543  	fetched, err := fetchBlocks(ctx, log, networkHistoryStore, goldenSourceHistorySegment[4000].HistorySegmentID, 3000)
   544  	require.NoError(t, err)
   545  	require.Equal(t, int64(3000), fetched)
   546  
   547  	snapshotCopyToPath = t.TempDir()
   548  
   549  	inputSnapshotService := setupSnapshotService(snapshotCopyToPath)
   550  
   551  	networkhistoryService := setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
   552  	segments, err := networkhistoryService.ListAllHistorySegments()
   553  	require.NoError(t, err)
   554  
   555  	chunk, err := segments.MostRecentContiguousHistory()
   556  	require.NoError(t, err)
   557  
   558  	loaded, err := networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   559  	require.NoError(t, err)
   560  	assert.Equal(t, int64(1801), loaded.LoadedFromHeight)
   561  	assert.Equal(t, int64(4000), loaded.LoadedToHeight)
   562  
   563  	dbSummary := getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
   564  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[4].currentTableSummaries, dbSummary.currentTableSummaries)
   565  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[4].historyTableSummaries, dbSummary.historyTableSummaries)
   566  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[4].caggSummaries, dbSummary.caggSummaries)
   567  
   568  	// Run events to height 5000
   569  	ctxWithCancel, cancelFn = context.WithCancel(ctx)
   570  	evtSource = newTestEventSourceWithProtocolUpdateMessage()
   571  
   572  	pus = service.NewProtocolUpgrade(nil, log)
   573  	puh = networkhistory.NewProtocolUpgradeHandler(log, pus, evtSource, func(ctx context.Context, chainID string,
   574  		toHeight int64,
   575  	) error {
   576  		return nil
   577  	})
   578  
   579  	var md5Hash string
   580  	broker, err = setupSQLBroker(ctx, sqlConfig, snapshotService,
   581  		func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
   582  			if lastCommittedBlockHeight > 0 && lastCommittedBlockHeight%snapshotInterval == 0 {
   583  				ss, err := service.CreateSnapshotAsynchronously(ctx, chainId, lastCommittedBlockHeight)
   584  				require.NoError(t, err)
   585  
   586  				waitForSnapshotToComplete(ss)
   587  
   588  				md5Hash, err = Md5Hash(ss.UnpublishedSnapshotDataDirectory())
   589  				require.NoError(t, err)
   590  
   591  				fromEventHashes = append(fromEventHashes, md5Hash)
   592  			}
   593  
   594  			if lastCommittedBlockHeight == 5000 {
   595  				cancelFn()
   596  			}
   597  		},
   598  		evtSource, puh)
   599  	require.NoError(t, err)
   600  
   601  	err = broker.Receive(ctxWithCancel)
   602  	if err != nil && !errors.Is(err, context.Canceled) {
   603  		require.NoError(t, err)
   604  	}
   605  
   606  	require.Equal(t, fromEventHashes[5], md5Hash)
   607  
   608  	dbSummary = getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
   609  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[5].currentTableSummaries, dbSummary.currentTableSummaries)
   610  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[5].historyTableSummaries, dbSummary.historyTableSummaries)
   611  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[5].caggSummaries, dbSummary.caggSummaries)
   612  }
   613  
   614  func TestRestoringNodeWithDataOlderAndNewerThanItContainsLoadsTheNewerData(t *testing.T) {
   615  	ctx, cancel := context.WithCancel(context.Background())
   616  	defer cancel()
   617  	require.NoError(t, networkHistoryStore.ResetIndex())
   618  
   619  	log := logging.NewTestLogger()
   620  
   621  	snapshotCopyToPath := t.TempDir()
   622  
   623  	inputSnapshotService := setupSnapshotService(snapshotCopyToPath)
   624  
   625  	emptyDatabaseAndSetSchemaVersion(0)
   626  
   627  	historySegment := goldenSourceHistorySegment[4000]
   628  
   629  	blocksFetched, err := fetchBlocks(ctx, log, networkHistoryStore, historySegment.HistorySegmentID, 1)
   630  	require.NoError(t, err)
   631  
   632  	assert.Equal(t, int64(1000), blocksFetched)
   633  	networkhistoryService := setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
   634  	segments, err := networkhistoryService.ListAllHistorySegments()
   635  	require.NoError(t, err)
   636  
   637  	chunk, err := segments.MostRecentContiguousHistory()
   638  	require.NoError(t, err)
   639  
   640  	loaded, err := networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   641  
   642  	require.NoError(t, err)
   643  
   644  	assert.Equal(t, int64(3001), loaded.LoadedFromHeight)
   645  	assert.Equal(t, int64(4000), loaded.LoadedToHeight)
   646  
   647  	// Now try to load in history from 0 to 5000
   648  	require.NoError(t, networkHistoryStore.ResetIndex())
   649  	snapshotCopyToPath = t.TempDir()
   650  	inputSnapshotService = setupSnapshotService(snapshotCopyToPath)
   651  
   652  	historySegment = goldenSourceHistorySegment[5000]
   653  
   654  	blocksFetched, err = fetchBlocks(ctx, log, networkHistoryStore, historySegment.HistorySegmentID, 5000)
   655  	require.NoError(t, err)
   656  
   657  	assert.Equal(t, int64(5000), blocksFetched)
   658  	networkhistoryService = setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
   659  	segments, err = networkhistoryService.ListAllHistorySegments()
   660  	require.NoError(t, err)
   661  
   662  	chunk, err = segments.MostRecentContiguousHistory()
   663  	require.NoError(t, err)
   664  
   665  	result, err := networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   666  	require.Nil(t, err)
   667  
   668  	assert.Equal(t, int64(4001), result.LoadedFromHeight)
   669  	assert.Equal(t, int64(5000), result.LoadedToHeight)
   670  
   671  	span, err := sqlstore.GetDatanodeBlockSpan(ctx, networkHistoryConnPool)
   672  	require.Nil(t, err)
   673  
   674  	assert.Equal(t, int64(3001), span.FromHeight)
   675  	assert.Equal(t, int64(5000), span.ToHeight)
   676  }
   677  
   678  func TestRestoringNodeWithHistoryOnlyFromBeforeTheNodesOldestBlockFails(t *testing.T) {
   679  	ctx, cancel := context.WithCancel(context.Background())
   680  	defer cancel()
   681  	require.NoError(t, networkHistoryStore.ResetIndex())
   682  
   683  	log := logging.NewTestLogger()
   684  
   685  	snapshotCopyToPath := t.TempDir()
   686  
   687  	inputSnapshotService := setupSnapshotService(snapshotCopyToPath)
   688  
   689  	emptyDatabaseAndSetSchemaVersion(0)
   690  
   691  	historySegment := goldenSourceHistorySegment[4000]
   692  
   693  	blocksFetched, err := fetchBlocks(ctx, log, networkHistoryStore, historySegment.HistorySegmentID, 1)
   694  	require.NoError(t, err)
   695  
   696  	assert.Equal(t, int64(1000), blocksFetched)
   697  	networkhistoryService := setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
   698  	segments, err := networkhistoryService.ListAllHistorySegments()
   699  	require.NoError(t, err)
   700  
   701  	chunk, err := segments.MostRecentContiguousHistory()
   702  	require.NoError(t, err)
   703  
   704  	loaded, err := networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   705  	require.NoError(t, err)
   706  
   707  	assert.Equal(t, int64(3001), loaded.LoadedFromHeight)
   708  	assert.Equal(t, int64(4000), loaded.LoadedToHeight)
   709  
   710  	// Now try to load in history from 1000 to 2000
   711  	require.NoError(t, networkHistoryStore.ResetIndex())
   712  	snapshotCopyToPath = t.TempDir()
   713  	inputSnapshotService = setupSnapshotService(snapshotCopyToPath)
   714  
   715  	historySegment = goldenSourceHistorySegment[1000]
   716  
   717  	blocksFetched, err = fetchBlocks(ctx, log, networkHistoryStore, historySegment.HistorySegmentID, 1)
   718  	require.NoError(t, err)
   719  
   720  	assert.Equal(t, int64(1000), blocksFetched)
   721  	networkhistoryService = setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
   722  	segments, err = networkhistoryService.ListAllHistorySegments()
   723  	require.NoError(t, err)
   724  
   725  	chunk, err = segments.MostRecentContiguousHistory()
   726  	require.NoError(t, err)
   727  
   728  	_, err = networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   729  	require.NotNil(t, err)
   730  }
   731  
   732  func TestRestoringNodeWithExistingDataFailsWhenLoadingWouldResultInNonContiguousHistory(t *testing.T) {
   733  	ctx, cancel := context.WithCancel(context.Background())
   734  	defer cancel()
   735  
   736  	log := logging.NewTestLogger()
   737  
   738  	require.NoError(t, networkHistoryStore.ResetIndex())
   739  	emptyDatabaseAndSetSchemaVersion(highestMigrationNumber)
   740  
   741  	snapshotCopyToPath := t.TempDir()
   742  	snapshotService := setupSnapshotService(snapshotCopyToPath)
   743  
   744  	ctxWithCancel, cancelFn := context.WithCancel(ctx)
   745  
   746  	evtSource := newTestEventSourceWithProtocolUpdateMessage()
   747  
   748  	pus := service.NewProtocolUpgrade(nil, log)
   749  	puh := networkhistory.NewProtocolUpgradeHandler(log, pus, evtSource, func(ctx context.Context, chainID string,
   750  		toHeight int64,
   751  	) error {
   752  		return nil
   753  	})
   754  
   755  	// Run events to height 1800
   756  
   757  	broker, err := setupSQLBroker(ctx, sqlConfig, snapshotService,
   758  		func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
   759  			if lastCommittedBlockHeight == 1800 {
   760  				cancelFn()
   761  			}
   762  		},
   763  		evtSource, puh)
   764  	require.NoError(t, err)
   765  
   766  	err = broker.Receive(ctxWithCancel)
   767  	if err != nil && !errors.Is(err, context.Canceled) {
   768  		require.NoError(t, err)
   769  	}
   770  
   771  	// Now fetch history but not enough to form a contiguous history with the existing data
   772  
   773  	fetched, err := fetchBlocks(ctx, log, networkHistoryStore, goldenSourceHistorySegment[4000].HistorySegmentID, 2000)
   774  	require.NoError(t, err)
   775  	require.Equal(t, int64(2000), fetched)
   776  
   777  	snapshotCopyToPath = t.TempDir()
   778  
   779  	inputSnapshotService := setupSnapshotService(snapshotCopyToPath)
   780  
   781  	networkhistoryService := setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
   782  	segments, err := networkhistoryService.ListAllHistorySegments()
   783  	require.NoError(t, err)
   784  
   785  	chunk, err := segments.MostRecentContiguousHistory()
   786  	require.NoError(t, err)
   787  
   788  	_, err = networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   789  	require.NotNil(t, err)
   790  }
   791  
   792  func TestRestoringFromDifferentHeightsWithFullHistory(t *testing.T) {
   793  	ctx, cancel := context.WithCancel(context.Background())
   794  	defer cancel()
   795  	require.NoError(t, networkHistoryStore.ResetIndex())
   796  
   797  	log := logging.NewTestLogger()
   798  
   799  	snapshotCopyToPath := t.TempDir()
   800  
   801  	inputSnapshotService := setupSnapshotService(snapshotCopyToPath)
   802  
   803  	for i := int64(0); i < numSnapshots; i++ {
   804  		emptyDatabaseAndSetSchemaVersion(0)
   805  		fromHeight := expectedHistorySegmentsFromHeights[i]
   806  		toHeight := expectedHistorySegmentsToHeights[i]
   807  
   808  		historySegment := goldenSourceHistorySegment[toHeight]
   809  
   810  		expectedBlocks := toHeight - fromHeight + 1
   811  		blocksFetched, err := fetchBlocks(ctx, log, networkHistoryStore, historySegment.HistorySegmentID, expectedBlocks)
   812  		require.NoError(t, err)
   813  
   814  		assert.Equal(t, expectedBlocks, blocksFetched)
   815  		networkhistoryService := setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
   816  		segments, err := networkhistoryService.ListAllHistorySegments()
   817  		require.NoError(t, err)
   818  
   819  		chunk, err := segments.MostRecentContiguousHistory()
   820  		require.NoError(t, err)
   821  
   822  		loaded, err := networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   823  		require.NoError(t, err)
   824  
   825  		assert.Equal(t, int64(1), loaded.LoadedFromHeight)
   826  		assert.Equal(t, toHeight, loaded.LoadedToHeight)
   827  
   828  		dbSummary := getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
   829  		assertSummariesAreEqual(t, fromEventsDatabaseSummaries[i].currentTableSummaries, dbSummary.currentTableSummaries)
   830  		assertSummariesAreEqual(t, fromEventsDatabaseSummaries[i].historyTableSummaries, dbSummary.historyTableSummaries)
   831  		assertSummariesAreEqual(t, fromEventsDatabaseSummaries[i].caggSummaries, dbSummary.caggSummaries)
   832  	}
   833  }
   834  
   835  func TestRestoreFromPartialHistoryAndProcessEvents(t *testing.T) {
   836  	ctx, cancel := context.WithCancel(context.Background())
   837  	defer cancel()
   838  	require.NoError(t, networkHistoryStore.ResetIndex())
   839  
   840  	var err error
   841  	log := logging.NewTestLogger()
   842  
   843  	emptyDatabaseAndSetSchemaVersion(0)
   844  
   845  	fetched, err := fetchBlocks(ctx, log, networkHistoryStore, goldenSourceHistorySegment[3000].HistorySegmentID, 1000)
   846  	require.NoError(t, err)
   847  	require.Equal(t, int64(1000), fetched)
   848  
   849  	snapshotCopyToPath := t.TempDir()
   850  
   851  	inputSnapshotService := setupSnapshotService(snapshotCopyToPath)
   852  
   853  	networkhistoryService := setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
   854  	segments, err := networkhistoryService.ListAllHistorySegments()
   855  	require.NoError(t, err)
   856  
   857  	chunk, err := segments.MostRecentContiguousHistory()
   858  	require.NoError(t, err)
   859  
   860  	loaded, err := networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   861  	require.NoError(t, err)
   862  	assert.Equal(t, int64(2001), loaded.LoadedFromHeight)
   863  	assert.Equal(t, int64(3000), loaded.LoadedToHeight)
   864  
   865  	connSource, err := sqlstore.NewTransactionalConnectionSource(ctx, logging.NewTestLogger(), sqlConfig.ConnectionConfig)
   866  	require.NoError(t, err)
   867  	defer connSource.Close()
   868  
   869  	evtSource, err := newTestEventSource(func(events.Event, chan<- events.Event) {})
   870  	require.NoError(t, err)
   871  
   872  	pus := service.NewProtocolUpgrade(nil, log)
   873  	puh := networkhistory.NewProtocolUpgradeHandler(log, pus, evtSource, func(ctx context.Context,
   874  		chainID string, toHeight int64,
   875  	) error {
   876  		return nil
   877  	})
   878  
   879  	// Play events from 3001 to 4000
   880  	ctxWithCancel, cancelFn := context.WithCancel(ctx)
   881  
   882  	var ss segment.Unpublished
   883  	var newSnapshotFileHashAt4000 string
   884  
   885  	outputSnapshotService := setupSnapshotService(t.TempDir())
   886  	sqlBroker, err := setupSQLBroker(ctx, sqlConfig, outputSnapshotService,
   887  		func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
   888  			if lastCommittedBlockHeight > 0 && lastCommittedBlockHeight%snapshotInterval == 0 {
   889  				ss, err = service.CreateSnapshotAsynchronously(ctx, chainId, lastCommittedBlockHeight)
   890  				require.NoError(t, err)
   891  				waitForSnapshotToComplete(ss)
   892  
   893  				if lastCommittedBlockHeight == 4000 {
   894  					newSnapshotFileHashAt4000, err = Md5Hash(ss.UnpublishedSnapshotDataDirectory())
   895  					require.NoError(t, err)
   896  				}
   897  
   898  				if lastCommittedBlockHeight == 5000 {
   899  					cancelFn()
   900  				}
   901  			}
   902  		},
   903  		evtSource, puh)
   904  	require.NoError(t, err)
   905  
   906  	err = sqlBroker.Receive(ctxWithCancel)
   907  	if err != nil && !errors.Is(err, context.Canceled) {
   908  		require.NoError(t, err)
   909  	}
   910  
   911  	assert.Equal(t, fromEventHashes[4], newSnapshotFileHashAt4000)
   912  
   913  	historyTableDelta := getSnapshotIntervalToHistoryTableDeltaSummary(ctx, sqlConfig.ConnectionConfig,
   914  		expectedHistorySegmentsFromHeights, expectedHistorySegmentsToHeights)
   915  
   916  	for i := 2; i < 5; i++ {
   917  		assertSummariesAreEqual(t, fromEventsIntervalToHistoryTableDelta[i], historyTableDelta[i])
   918  	}
   919  
   920  	assertIntervalHistoryIsEmpty(t, historyTableDelta, 0)
   921  	assertIntervalHistoryIsEmpty(t, historyTableDelta, 1)
   922  }
   923  
   924  func TestRestoreFromFullHistorySnapshotAndProcessEvents(t *testing.T) {
   925  	ctx, cancel := context.WithCancel(context.Background())
   926  	defer cancel()
   927  	require.NoError(t, networkHistoryStore.ResetIndex())
   928  
   929  	var err error
   930  	log := logging.NewTestLogger()
   931  
   932  	emptyDatabaseAndSetSchemaVersion(0)
   933  
   934  	fetched, err := fetchBlocks(ctx, log, networkHistoryStore, goldenSourceHistorySegment[2000].HistorySegmentID, 2000)
   935  	require.NoError(t, err)
   936  	require.Equal(t, int64(2000), fetched)
   937  
   938  	snapshotCopyToPath := t.TempDir()
   939  
   940  	inputSnapshotService := setupSnapshotService(snapshotCopyToPath)
   941  
   942  	networkhistoryService := setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
   943  	segments, err := networkhistoryService.ListAllHistorySegments()
   944  	require.NoError(t, err)
   945  
   946  	chunk, err := segments.MostRecentContiguousHistory()
   947  	require.NoError(t, err)
   948  
   949  	loaded, err := networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, false, false)
   950  	require.NoError(t, err)
   951  	assert.Equal(t, int64(1), loaded.LoadedFromHeight)
   952  	assert.Equal(t, int64(2000), loaded.LoadedToHeight)
   953  
   954  	connSource, err := sqlstore.NewTransactionalConnectionSource(ctx, logging.NewTestLogger(), sqlConfig.ConnectionConfig)
   955  	require.NoError(t, err)
   956  	defer connSource.Close()
   957  
   958  	ctxWithCancel, cancelFn := context.WithCancel(ctx)
   959  
   960  	var snapshotFileHashAfterReloadAt2000AndEventReplayTo3000 string
   961  	outputSnapshotService := setupSnapshotService(t.TempDir())
   962  
   963  	evtSource := newTestEventSourceWithProtocolUpdateMessage()
   964  
   965  	puh := networkhistory.NewProtocolUpgradeHandler(log, service.NewProtocolUpgrade(nil, log), evtSource,
   966  		func(ctx context.Context, chainID string, toHeight int64) error {
   967  			return networkhistoryService.CreateAndPublishSegment(ctx, chainID, toHeight)
   968  		})
   969  
   970  	var lastCommittedBlockHeight int64
   971  	sqlBroker, err := setupSQLBroker(ctx, sqlConfig, outputSnapshotService,
   972  		func(ctx context.Context, service *snapshot.Service, chainId string, blockHeight int64, snapshotTaken bool) {
   973  			lastCommittedBlockHeight = blockHeight
   974  		},
   975  		evtSource, puh,
   976  	)
   977  	require.NoError(t, err)
   978  
   979  	err = sqlBroker.Receive(ctxWithCancel)
   980  	if err != nil && !errors.Is(err, context.Canceled) {
   981  		require.NoError(t, err)
   982  	}
   983  
   984  	assert.Equal(t, int64(2500), lastCommittedBlockHeight)
   985  
   986  	err = migrateUpToDatabaseVersion(testMigrationVersionNum)
   987  	require.NoError(t, err)
   988  
   989  	// After protocol upgrade restart the broker
   990  	sqlBroker, err = setupSQLBroker(ctx, sqlConfig, outputSnapshotService,
   991  		func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
   992  			if lastCommittedBlockHeight > 0 && lastCommittedBlockHeight%snapshotInterval == 0 {
   993  				if lastCommittedBlockHeight == 3000 {
   994  					ss, err := service.CreateSnapshotAsynchronously(ctx, chainId, lastCommittedBlockHeight)
   995  					require.NoError(t, err)
   996  					waitForSnapshotToComplete(ss)
   997  
   998  					snapshotFileHashAfterReloadAt2000AndEventReplayTo3000, err = Md5Hash(ss.UnpublishedSnapshotDataDirectory())
   999  					require.NoError(t, err)
  1000  					cancelFn()
  1001  				}
  1002  			}
  1003  		},
  1004  		evtSource, networkhistory.NewProtocolUpgradeHandler(log, service.NewProtocolUpgrade(nil, log), evtSource,
  1005  			func(ctx context.Context, chainID string, toHeight int64) error {
  1006  				return nil
  1007  			}),
  1008  	)
  1009  	require.NoError(t, err)
  1010  
  1011  	err = sqlBroker.Receive(ctxWithCancel)
  1012  	if err != nil && !errors.Is(err, context.Canceled) {
  1013  		require.NoError(t, err)
  1014  	}
  1015  
  1016  	require.Equal(t, fromEventHashes[3], snapshotFileHashAfterReloadAt2000AndEventReplayTo3000)
  1017  
  1018  	updateAllContinuousAggregateData(ctx)
  1019  
  1020  	databaseSummaryAtBlock3000AfterSnapshotReloadFromBlock2000 := getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
  1021  
  1022  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[3].currentTableSummaries, databaseSummaryAtBlock3000AfterSnapshotReloadFromBlock2000.currentTableSummaries)
  1023  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[3].historyTableSummaries, databaseSummaryAtBlock3000AfterSnapshotReloadFromBlock2000.historyTableSummaries)
  1024  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[3].caggSummaries, databaseSummaryAtBlock3000AfterSnapshotReloadFromBlock2000.caggSummaries)
  1025  }
  1026  
  1027  func TestRestoreFromFullHistorySnapshotWithIndexesAndOrderTriggersAndProcessEvents(t *testing.T) {
  1028  	ctx, cancel := context.WithCancel(context.Background())
  1029  	defer cancel()
  1030  	require.NoError(t, networkHistoryStore.ResetIndex())
  1031  
  1032  	var err error
  1033  	log := logging.NewTestLogger()
  1034  
  1035  	emptyDatabaseAndSetSchemaVersion(0)
  1036  
  1037  	fetched, err := fetchBlocks(ctx, log, networkHistoryStore, goldenSourceHistorySegment[2000].HistorySegmentID, 2000)
  1038  	require.NoError(t, err)
  1039  	require.Equal(t, int64(2000), fetched)
  1040  
  1041  	snapshotCopyToPath := t.TempDir()
  1042  
  1043  	inputSnapshotService := setupSnapshotService(snapshotCopyToPath)
  1044  
  1045  	networkhistoryService := setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
  1046  	segments, err := networkhistoryService.ListAllHistorySegments()
  1047  	require.NoError(t, err)
  1048  
  1049  	chunk, err := segments.MostRecentContiguousHistory()
  1050  	require.NoError(t, err)
  1051  
  1052  	loaded, err := networkhistoryService.LoadNetworkHistoryIntoDatanode(ctx, chunk, sqlConfig.ConnectionConfig, true, false)
  1053  	require.NoError(t, err)
  1054  	assert.Equal(t, int64(1), loaded.LoadedFromHeight)
  1055  	assert.Equal(t, int64(2000), loaded.LoadedToHeight)
  1056  
  1057  	connSource, err := sqlstore.NewTransactionalConnectionSource(ctx, logging.NewTestLogger(), sqlConfig.ConnectionConfig)
  1058  	require.NoError(t, err)
  1059  	defer connSource.Close()
  1060  
  1061  	ctxWithCancel, cancelFn := context.WithCancel(ctx)
  1062  
  1063  	var snapshotFileHashAfterReloadAt2000AndEventReplayTo3000 string
  1064  	outputSnapshotService := setupSnapshotService(t.TempDir())
  1065  
  1066  	evtSource := newTestEventSourceWithProtocolUpdateMessage()
  1067  
  1068  	puh := networkhistory.NewProtocolUpgradeHandler(log, service.NewProtocolUpgrade(nil, log), evtSource,
  1069  		func(ctx context.Context, chainID string, toHeight int64) error {
  1070  			return networkhistoryService.CreateAndPublishSegment(ctx, chainID, toHeight)
  1071  		})
  1072  
  1073  	var lastCommittedBlockHeight int64
  1074  	sqlBroker, err := setupSQLBroker(ctx, sqlConfig, outputSnapshotService,
  1075  		func(ctx context.Context, service *snapshot.Service, chainId string, blockHeight int64, snapshotTaken bool) {
  1076  			lastCommittedBlockHeight = blockHeight
  1077  		},
  1078  		evtSource, puh,
  1079  	)
  1080  	require.NoError(t, err)
  1081  
  1082  	err = sqlBroker.Receive(ctxWithCancel)
  1083  	if err != nil && !errors.Is(err, context.Canceled) {
  1084  		require.NoError(t, err)
  1085  	}
  1086  
  1087  	assert.Equal(t, int64(2500), lastCommittedBlockHeight)
  1088  
  1089  	err = migrateUpToDatabaseVersion(testMigrationVersionNum)
  1090  	require.NoError(t, err)
  1091  
  1092  	// After protocol upgrade restart the broker
  1093  	sqlBroker, err = setupSQLBroker(ctx, sqlConfig, outputSnapshotService,
  1094  		func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
  1095  			if lastCommittedBlockHeight > 0 && lastCommittedBlockHeight%snapshotInterval == 0 {
  1096  				if lastCommittedBlockHeight == 3000 {
  1097  					ss, err := service.CreateSnapshotAsynchronously(ctx, chainId, lastCommittedBlockHeight)
  1098  					require.NoError(t, err)
  1099  					waitForSnapshotToComplete(ss)
  1100  
  1101  					snapshotFileHashAfterReloadAt2000AndEventReplayTo3000, err = Md5Hash(ss.UnpublishedSnapshotDataDirectory())
  1102  					require.NoError(t, err)
  1103  					cancelFn()
  1104  				}
  1105  			}
  1106  		},
  1107  		evtSource, networkhistory.NewProtocolUpgradeHandler(log, service.NewProtocolUpgrade(nil, log), evtSource,
  1108  			func(ctx context.Context, chainID string, toHeight int64) error {
  1109  				return nil
  1110  			}),
  1111  	)
  1112  	require.NoError(t, err)
  1113  
  1114  	err = sqlBroker.Receive(ctxWithCancel)
  1115  	if err != nil && !errors.Is(err, context.Canceled) {
  1116  		require.NoError(t, err)
  1117  	}
  1118  
  1119  	require.Equal(t, fromEventHashes[3], snapshotFileHashAfterReloadAt2000AndEventReplayTo3000)
  1120  
  1121  	updateAllContinuousAggregateData(ctx)
  1122  
  1123  	databaseSummaryAtBlock3000AfterSnapshotReloadFromBlock2000 := getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
  1124  
  1125  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[3].currentTableSummaries, databaseSummaryAtBlock3000AfterSnapshotReloadFromBlock2000.currentTableSummaries)
  1126  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[3].historyTableSummaries, databaseSummaryAtBlock3000AfterSnapshotReloadFromBlock2000.historyTableSummaries)
  1127  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[3].caggSummaries, databaseSummaryAtBlock3000AfterSnapshotReloadFromBlock2000.caggSummaries)
  1128  }
  1129  
  1130  func fetchBlocks(ctx context.Context, log *logging.Logger, st *store.Store, rootSegmentID string, numBlocksToFetch int64) (int64, error) {
  1131  	var err error
  1132  	var fetched int64
  1133  	for i := 0; i < 5; i++ {
  1134  		ctxWithTimeout, cancelFn := context.WithTimeout(ctx, 10*time.Second)
  1135  
  1136  		fetched, err = networkhistory.FetchHistoryBlocks(ctxWithTimeout, log.Infof, rootSegmentID,
  1137  			func(ctx context.Context, historySegmentID string) (networkhistory.FetchResult, error) {
  1138  				segment, err := st.FetchHistorySegment(ctx, historySegmentID)
  1139  				if err != nil {
  1140  					return networkhistory.FetchResult{}, err
  1141  				}
  1142  				return networkhistory.FromSegmentIndexEntry(segment), nil
  1143  			}, numBlocksToFetch)
  1144  		cancelFn()
  1145  		if err == nil {
  1146  			return fetched, nil
  1147  		}
  1148  	}
  1149  
  1150  	return 0, fmt.Errorf("failed to fetch blocks:%w", err)
  1151  }
  1152  
  1153  func TestRollingBackToHeightAcrossSchemaUpdateBoundary(t *testing.T) {
  1154  	ctx, cancel := context.WithCancel(context.Background())
  1155  	defer cancel()
  1156  
  1157  	log := logging.NewTestLogger()
  1158  
  1159  	require.NoError(t, networkHistoryStore.ResetIndex())
  1160  	emptyDatabaseAndSetSchemaVersion(highestMigrationNumber)
  1161  
  1162  	snapshotCopyToPath := t.TempDir()
  1163  	snapshotService := setupSnapshotService(snapshotCopyToPath)
  1164  
  1165  	ctxWithCancel, cancelFn := context.WithCancel(ctx)
  1166  
  1167  	evtSource := newTestEventSourceWithProtocolUpdateMessage()
  1168  
  1169  	pus := service.NewProtocolUpgrade(nil, log)
  1170  	puh := networkhistory.NewProtocolUpgradeHandler(log, pus, evtSource, func(ctx context.Context, chainID string,
  1171  		toHeight int64,
  1172  	) error {
  1173  		return nil
  1174  	})
  1175  
  1176  	// Run events to height 5000
  1177  	broker, err := setupSQLBroker(ctx, sqlConfig, snapshotService,
  1178  		func(ctx context.Context, service *snapshot.Service, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
  1179  			if lastCommittedBlockHeight == 5000 {
  1180  				cancelFn()
  1181  			}
  1182  		},
  1183  		evtSource, puh)
  1184  	require.NoError(t, err)
  1185  
  1186  	err = broker.Receive(ctxWithCancel)
  1187  	if err != nil && !errors.Is(err, context.Canceled) {
  1188  		require.NoError(t, err)
  1189  	}
  1190  	updateAllContinuousAggregateData(ctx)
  1191  
  1192  	fetched, err := fetchBlocks(ctx, log, networkHistoryStore, goldenSourceHistorySegment[5000].HistorySegmentID, 5000)
  1193  	require.NoError(t, err)
  1194  	require.Equal(t, int64(5000), fetched)
  1195  
  1196  	snapshotCopyToPath = t.TempDir()
  1197  
  1198  	inputSnapshotService := setupSnapshotService(snapshotCopyToPath)
  1199  
  1200  	networkhistoryService := setupNetworkHistoryService(ctx, log, inputSnapshotService, networkHistoryStore, snapshotCopyToPath)
  1201  
  1202  	// Rollback to a height pre protocol upgrade
  1203  	err = networkhistoryService.RollbackToHeight(ctx, log, 2000)
  1204  	require.NoError(t, err)
  1205  
  1206  	dbSummary := getDatabaseDataSummary(ctx, sqlConfig.ConnectionConfig)
  1207  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[1].currentTableSummaries, dbSummary.currentTableSummaries)
  1208  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[1].historyTableSummaries, dbSummary.historyTableSummaries)
  1209  	assertSummariesAreEqual(t, fromEventsDatabaseSummaries[1].caggSummaries, dbSummary.caggSummaries)
  1210  
  1211  	historySegments, err := networkHistoryStore.ListAllIndexEntriesMostRecentFirst()
  1212  	require.NoError(t, err)
  1213  	assert.Equal(t, 2, len(historySegments))
  1214  	assert.Equal(t, int64(2000), historySegments[0].HeightTo)
  1215  	assert.Equal(t, int64(1000), historySegments[1].HeightTo)
  1216  }
  1217  
  1218  func setupNetworkHistoryService(ctx context.Context, log *logging.Logger, inputSnapshotService *snapshot.Service, store *store.Store,
  1219  	snapshotCopyToPath string,
  1220  ) *networkhistory.Service {
  1221  	cfg := networkhistory.NewDefaultConfig()
  1222  	cfg.Publish = false
  1223  
  1224  	datanodeConfig := config2.NewDefaultConfig()
  1225  
  1226  	networkHistoryService, err := networkhistory.New(ctx, log, chainID, cfg, networkHistoryConnPool,
  1227  		inputSnapshotService, store, datanodeConfig.API.Port, snapshotCopyToPath)
  1228  	if err != nil {
  1229  		panic(err)
  1230  	}
  1231  
  1232  	return networkHistoryService
  1233  }
  1234  
  1235  type sqlStoreBroker interface {
  1236  	Receive(ctx context.Context) error
  1237  }
  1238  
  1239  func emptyDatabaseAndSetSchemaVersion(schemaVersion int64) {
  1240  	// For these we need a totally fresh database every time to ensure we model as closely as
  1241  	// possible what happens in practice
  1242  	var err error
  1243  	var poolConfig *pgxpool.Config
  1244  
  1245  	poolConfig, err = sqlConfig.ConnectionConfig.GetPoolConfig()
  1246  	if err != nil {
  1247  		panic(fmt.Errorf("failed to get pool config: %w", err))
  1248  	}
  1249  
  1250  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
  1251  
  1252  	if _, err = db.Exec(`SELECT alter_job(job_id, scheduled => false) FROM timescaledb_information.jobs WHERE proc_name = 'policy_refresh_continuous_aggregate'`); err != nil {
  1253  		panic(fmt.Errorf("failed to stop continuous aggregates: %w", err))
  1254  	}
  1255  	db.Close()
  1256  
  1257  	for i := 0; i < 5; i++ {
  1258  		err = sqlstore.WipeDatabaseAndMigrateSchemaToVersion(logging.NewTestLogger(), sqlConfig.ConnectionConfig, schemaVersion, sqlFs, false)
  1259  		if err == nil {
  1260  			break
  1261  		}
  1262  		time.Sleep(5 * time.Second)
  1263  	}
  1264  
  1265  	if err != nil {
  1266  		panic(err)
  1267  	}
  1268  }
  1269  
  1270  func panicIfHistorySegmentIdsNotEqual(actual, expected string, snapshots []segment.Unpublished) {
  1271  	if expected != actual {
  1272  		snapshotPaths := ""
  1273  		for _, sn := range snapshots {
  1274  			snapshotPaths += "," + sn.ZipFileName()
  1275  		}
  1276  
  1277  		panic(fmt.Errorf("history segment ids are not equal, expected: %s  actual: %s\n"+
  1278  			"If the database schema has changed or event file been updated the history segment ids "+
  1279  			"will need updating.  Snapshot files: %s", expected, actual, snapshotPaths))
  1280  	}
  1281  }
  1282  
  1283  func assertIntervalHistoryIsEmpty(t *testing.T, historyTableDelta []map[string]tableDataSummary, interval int) {
  1284  	t.Helper()
  1285  	totalRowCount := 0
  1286  	for _, summary := range historyTableDelta[interval] {
  1287  		totalRowCount += summary.rowCount
  1288  	}
  1289  	assert.Equal(t, 0, totalRowCount, "expected interval history to be empty but found %d rows", totalRowCount)
  1290  }
  1291  
  1292  func setupSnapshotService(snapshotCopyToPath string) *snapshot.Service {
  1293  	snapshotServiceCfg := snapshot.NewDefaultConfig()
  1294  	snapshotService, err := snapshot.NewSnapshotService(logging.NewTestLogger(), snapshotServiceCfg,
  1295  		networkHistoryConnPool, networkHistoryStore, snapshotCopyToPath, migrateUpToDatabaseVersion,
  1296  		migrateDownToDatabaseVersion)
  1297  	if err != nil {
  1298  		panic(err)
  1299  	}
  1300  
  1301  	return snapshotService
  1302  }
  1303  
  1304  type ProtocolUpgradeHandler interface {
  1305  	OnProtocolUpgradeEvent(ctx context.Context, chainID string, lastCommittedBlockHeight int64) error
  1306  	GetProtocolUpgradeStarted() bool
  1307  }
  1308  
  1309  func setupSQLBroker(ctx context.Context, testDbConfig sqlstore.Config, snapshotService *snapshot.Service,
  1310  	onBlockCommitted func(ctx context.Context, service *snapshot.Service, chainId string,
  1311  		lastCommittedBlockHeight int64, snapshotTaken bool), evtSource eventSource, protocolUpdateHandler ProtocolUpgradeHandler,
  1312  ) (sqlStoreBroker, error) {
  1313  	transactionalConnectionSource, err := sqlstore.NewTransactionalConnectionSource(ctx, logging.NewTestLogger(), testDbConfig.ConnectionConfig)
  1314  	if err != nil {
  1315  		return nil, err
  1316  	}
  1317  	go func() {
  1318  		for range ctx.Done() {
  1319  			transactionalConnectionSource.Close()
  1320  		}
  1321  	}()
  1322  
  1323  	candlesV2Config := candlesv2.NewDefaultConfig()
  1324  	cfg := service.NewDefaultConfig()
  1325  	subscribers := start.SQLSubscribers{}
  1326  	subscribers.CreateAllStores(ctx, logging.NewTestLogger(), transactionalConnectionSource, candlesV2Config.CandleStore)
  1327  	err = subscribers.SetupServices(ctx, logging.NewTestLogger(), cfg, candlesV2Config)
  1328  	if err != nil {
  1329  		return nil, err
  1330  	}
  1331  
  1332  	subscribers.SetupSQLSubscribers()
  1333  
  1334  	blockStore := sqlstore.NewBlocks(transactionalConnectionSource)
  1335  	if err != nil {
  1336  		return nil, fmt.Errorf("failed to create block store: %w", err)
  1337  	}
  1338  
  1339  	config := broker.NewDefaultConfig()
  1340  
  1341  	sqlBroker := broker.NewSQLStoreBroker(logging.NewTestLogger(), config, chainID, evtSource,
  1342  		transactionalConnectionSource, blockStore, func(ctx context.Context, chainId string, lastCommittedBlockHeight int64, snapshotTaken bool) {
  1343  			onBlockCommitted(ctx, snapshotService, chainId, lastCommittedBlockHeight, snapshotTaken)
  1344  		}, protocolUpdateHandler, subscribers.GetSQLSubscribers(),
  1345  	)
  1346  	return sqlBroker, nil
  1347  }
  1348  
  1349  type eventSource interface {
  1350  	Listen() error
  1351  	Receive(ctx context.Context) (<-chan events.Event, <-chan error)
  1352  }
  1353  
  1354  type TestEventSource struct {
  1355  	fileSource eventSource
  1356  	onEvent    func(events.Event, chan<- events.Event)
  1357  }
  1358  
  1359  func newTestEventSource(onEvent func(events.Event, chan<- events.Event)) (*TestEventSource, error) {
  1360  	rawEvtSource, err := broker.NewBufferFilesEventSource(eventsDir, 0, 0, chainID)
  1361  	if err != nil {
  1362  		return nil, err
  1363  	}
  1364  	evtSource := broker.NewDeserializer(rawEvtSource)
  1365  
  1366  	return &TestEventSource{
  1367  		fileSource: evtSource,
  1368  		onEvent:    onEvent,
  1369  	}, nil
  1370  }
  1371  
  1372  func (e *TestEventSource) Listen() error {
  1373  	e.fileSource.Listen()
  1374  	return nil
  1375  }
  1376  
  1377  func (e *TestEventSource) Receive(ctx context.Context) (<-chan events.Event, <-chan error) {
  1378  	sourceEventCh, sourceErrCh := e.fileSource.Receive(ctx)
  1379  
  1380  	sinkEventCh := make(chan events.Event)
  1381  	sinkErrCh := make(chan error)
  1382  
  1383  	go func() {
  1384  		for {
  1385  			select {
  1386  			case <-ctx.Done():
  1387  				return
  1388  			case err := <-sourceErrCh:
  1389  				sinkErrCh <- err
  1390  			case event := <-sourceEventCh:
  1391  				e.onEvent(event, sinkEventCh)
  1392  				sinkEventCh <- event
  1393  			}
  1394  		}
  1395  	}()
  1396  
  1397  	return sinkEventCh, sinkErrCh
  1398  }
  1399  
  1400  func (e *TestEventSource) Send(evt events.Event) error {
  1401  	return nil
  1402  }
  1403  
  1404  type tableDataSummary struct {
  1405  	tableName string
  1406  	rowCount  int
  1407  	dataHash  string
  1408  }
  1409  
  1410  func assertSummariesAreEqual(t *testing.T, expected map[string]tableDataSummary, actual map[string]tableDataSummary) {
  1411  	t.Helper()
  1412  	if len(expected) != len(actual) {
  1413  		require.Equalf(t, len(expected), len(actual), "expected table count: %d actual: %d", len(expected), len(actual))
  1414  	}
  1415  
  1416  	for k, v := range expected {
  1417  		if v.rowCount != actual[k].rowCount {
  1418  			assert.Equalf(t, v.rowCount, actual[k].rowCount, "expected table row count for %s: %d actual row count: %d", k, v.rowCount, actual[k].rowCount)
  1419  		}
  1420  
  1421  		if v.dataHash != actual[k].dataHash {
  1422  			assert.Equalf(t, v.dataHash, actual[k].dataHash, "expected data hash for %s: %s actual data hash: %s", k, v.dataHash, actual[k].dataHash)
  1423  		}
  1424  	}
  1425  }
  1426  
  1427  type databaseSummary struct {
  1428  	currentTableSummaries map[string]tableDataSummary
  1429  	historyTableSummaries map[string]tableDataSummary
  1430  	caggSummaries         map[string]tableDataSummary
  1431  	dbMetaData            snapshot.DatabaseMetadata
  1432  }
  1433  
  1434  func getDatabaseDataSummary(ctx context.Context, connConfig sqlstore.ConnectionConfig) databaseSummary {
  1435  	conn, err := pgxpool.Connect(ctx, connConfig.GetConnectionString())
  1436  	if err != nil {
  1437  		panic(err)
  1438  	}
  1439  
  1440  	currentStateDataSummaries := map[string]tableDataSummary{}
  1441  	historyStateDataSummaries := map[string]tableDataSummary{}
  1442  	dbMetaData, err := snapshot.NewDatabaseMetaData(ctx, conn)
  1443  	if err != nil {
  1444  		panic(err)
  1445  	}
  1446  
  1447  	for table, meta := range dbMetaData.TableNameToMetaData {
  1448  		summary := getTableOrViewSummary(ctx, conn, table, meta.SortOrder)
  1449  
  1450  		if meta.Hypertable {
  1451  			historyStateDataSummaries[table] = summary
  1452  		} else {
  1453  			currentStateDataSummaries[table] = summary
  1454  		}
  1455  	}
  1456  
  1457  	// No sensible way to get the order by from database metadata so it is hardcoded here, will need to be added
  1458  	// to if new CAGGS are added
  1459  	viewNameToGroupBy := map[string]string{
  1460  		"conflated_balances":       "account_id, bucket",
  1461  		"conflated_margin_levels":  "account_id, bucket",
  1462  		"conflated_positions":      "market_id, party_id, bucket",
  1463  		"trades_candle_15_minutes": "market_id, period_start",
  1464  		"trades_candle_1_day":      "market_id, period_start",
  1465  		"trades_candle_1_hour":     "market_id, period_start",
  1466  		"trades_candle_1_minute":   "market_id, period_start",
  1467  		"trades_candle_5_minutes":  "market_id, period_start",
  1468  		"trades_candle_6_hours":    "market_id, period_start",
  1469  		"trades_candle_30_minutes": "market_id, period_start",
  1470  		"trades_candle_4_hours":    "market_id, period_start",
  1471  		"trades_candle_8_hours":    "market_id, period_start",
  1472  		"trades_candle_12_hours":   "market_id, period_start",
  1473  		"trades_candle_7_days":     "market_id, period_start",
  1474  	}
  1475  
  1476  	caggSummaries := map[string]tableDataSummary{}
  1477  	for _, caggMeta := range dbMetaData.ContinuousAggregatesMetaData {
  1478  		summary := getTableOrViewSummary(ctx, conn, caggMeta.Name, viewNameToGroupBy[caggMeta.Name])
  1479  		caggSummaries[caggMeta.Name] = summary
  1480  	}
  1481  
  1482  	return databaseSummary{
  1483  		historyTableSummaries: historyStateDataSummaries, currentTableSummaries: currentStateDataSummaries,
  1484  		caggSummaries: caggSummaries,
  1485  		dbMetaData:    dbMetaData,
  1486  	}
  1487  }
  1488  
  1489  func getTableOrViewSummary(ctx context.Context, conn *pgxpool.Pool, table string, sortOrder string) tableDataSummary {
  1490  	summary := tableDataSummary{tableName: table}
  1491  	err := conn.QueryRow(ctx, fmt.Sprintf("select count(*) from %s", table)).Scan(&summary.rowCount)
  1492  	if err != nil {
  1493  		panic(err)
  1494  	}
  1495  
  1496  	if summary.rowCount > 0 {
  1497  		err = conn.QueryRow(ctx, fmt.Sprintf("SELECT md5(CAST((array_agg(f.* order by %s))AS text)) FROM %s f; ",
  1498  			sortOrder, table)).Scan(&summary.dataHash)
  1499  		if err != nil {
  1500  			panic(err)
  1501  		}
  1502  	}
  1503  	return summary
  1504  }
  1505  
  1506  func getSnapshotIntervalToHistoryTableDeltaSummary(ctx context.Context,
  1507  	connConfig sqlstore.ConnectionConfig, expectedHistorySegmentsFromHeights []int64,
  1508  	expectedHistorySegmentsToHeights []int64,
  1509  ) []map[string]tableDataSummary {
  1510  	conn, err := pgxpool.Connect(ctx, connConfig.GetConnectionString())
  1511  	if err != nil {
  1512  		panic(err)
  1513  	}
  1514  
  1515  	dbMetaData, err := snapshot.NewDatabaseMetaData(ctx, conn)
  1516  	if err != nil {
  1517  		panic(err)
  1518  	}
  1519  
  1520  	var snapshotNumToHistoryTableSummary []map[string]tableDataSummary
  1521  
  1522  	for i := 0; i < len(expectedHistorySegmentsFromHeights); i++ {
  1523  		fromHeight := expectedHistorySegmentsFromHeights[i]
  1524  		toHeight := expectedHistorySegmentsToHeights[i]
  1525  
  1526  		whereClause := fmt.Sprintf("Where vega_time >= (SELECT vega_time from blocks where height = %d) and  vega_time <= (SELECT vega_time from blocks where height = %d)",
  1527  			fromHeight, toHeight)
  1528  
  1529  		intervalHistoryTableSummary := map[string]tableDataSummary{}
  1530  		for table, meta := range dbMetaData.TableNameToMetaData {
  1531  			if meta.Hypertable {
  1532  				summary := tableDataSummary{tableName: table}
  1533  				err := conn.QueryRow(ctx, fmt.Sprintf("select count(*) from %s %s", table, whereClause)).Scan(&summary.rowCount)
  1534  				if err != nil {
  1535  					panic(err)
  1536  				}
  1537  
  1538  				if summary.rowCount > 0 {
  1539  					err = conn.QueryRow(ctx, fmt.Sprintf("SELECT md5(CAST((array_agg(f.* order by %s))AS text)) FROM %s f %s; ",
  1540  						meta.SortOrder, table, whereClause)).Scan(&summary.dataHash)
  1541  					if err != nil {
  1542  						panic(err)
  1543  					}
  1544  				}
  1545  
  1546  				intervalHistoryTableSummary[table] = summary
  1547  			}
  1548  		}
  1549  		snapshotNumToHistoryTableSummary = append(snapshotNumToHistoryTableSummary, intervalHistoryTableSummary)
  1550  	}
  1551  	return snapshotNumToHistoryTableSummary
  1552  }
  1553  
  1554  func waitForSnapshotToComplete(sf segment.Unpublished) {
  1555  	for {
  1556  		time.Sleep(10 * time.Millisecond)
  1557  		// wait for snapshot current  file
  1558  		_, err := os.Stat(sf.UnpublishedSnapshotDataDirectory())
  1559  		if err != nil {
  1560  			if errors.Is(err, os.ErrNotExist) {
  1561  				continue
  1562  			} else {
  1563  				panic(err)
  1564  			}
  1565  		}
  1566  
  1567  		// wait for snapshot data dump in progress file to be removed
  1568  
  1569  		_, err = os.Stat(sf.InProgressFilePath())
  1570  		if err != nil {
  1571  			if errors.Is(err, os.ErrNotExist) {
  1572  				break
  1573  			} else {
  1574  				panic(err)
  1575  			}
  1576  		} else {
  1577  			continue
  1578  		}
  1579  	}
  1580  }
  1581  
  1582  func decompressEventFile() {
  1583  	sourceFile, err := os.Open(compressedEventsFile)
  1584  	if err != nil {
  1585  		panic(err)
  1586  	}
  1587  
  1588  	zr, err := gzip.NewReader(sourceFile)
  1589  	if err != nil {
  1590  		panic(err)
  1591  	}
  1592  
  1593  	fileToWrite, err := os.Create(eventsFile)
  1594  	if err != nil {
  1595  		panic(err)
  1596  	}
  1597  	if _, err := io.Copy(fileToWrite, zr); err != nil {
  1598  		panic(err)
  1599  	}
  1600  }
  1601  
  1602  func setupTestSQLMigrations() (int64, fs.FS) {
  1603  	sourceMigrationsDir, err := os.Getwd()
  1604  	if err != nil {
  1605  		panic(err)
  1606  	}
  1607  
  1608  	sourceMigrationsDir, _ = filepath.Split(sourceMigrationsDir)
  1609  	sourceMigrationsDir = filepath.Join(sourceMigrationsDir, "sqlstore", "migrations")
  1610  
  1611  	testMigrationsDir, err = os.MkdirTemp("", "migrations")
  1612  	if err != nil {
  1613  		panic(err)
  1614  	}
  1615  
  1616  	if err := os.Mkdir(filepath.Join(testMigrationsDir, sqlstore.SQLMigrationsDir), fs.ModePerm); err != nil {
  1617  		panic(fmt.Errorf("failed to create migrations dir: %w", err))
  1618  	}
  1619  
  1620  	var highestMigrationNumber int64
  1621  	err = filepath.Walk(sourceMigrationsDir, func(path string, info os.FileInfo, err error) error {
  1622  		if err != nil || (info != nil && info.IsDir()) {
  1623  			return nil //nolint:nilerr
  1624  		}
  1625  		if strings.HasSuffix(info.Name(), ".sql") {
  1626  			split := strings.Split(info.Name(), "_")
  1627  			if len(split) < 2 {
  1628  				return errors.New("expected sql filename of form <version>_<name>.sql")
  1629  			}
  1630  
  1631  			migrationNum, err := strconv.Atoi(split[0])
  1632  			if err != nil {
  1633  				return fmt.Errorf("expected first part of file name to be integer, is %s", split[0])
  1634  			}
  1635  
  1636  			if int64(migrationNum) > highestMigrationNumber {
  1637  				highestMigrationNumber = int64(migrationNum)
  1638  			}
  1639  
  1640  			data, err := os.ReadFile(filepath.Join(sourceMigrationsDir, info.Name()))
  1641  			if err != nil {
  1642  				return fmt.Errorf("failed to read file:%w", err)
  1643  			}
  1644  
  1645  			err = os.WriteFile(filepath.Join(testMigrationsDir, sqlstore.SQLMigrationsDir, info.Name()), data, fs.ModePerm)
  1646  			if err != nil {
  1647  				return fmt.Errorf("failed to write file:%w", err)
  1648  			}
  1649  		}
  1650  		return nil
  1651  	})
  1652  
  1653  	if err != nil {
  1654  		panic(err)
  1655  	}
  1656  
  1657  	// Create a file with a new migration
  1658  	sql, err := os.ReadFile(testMigrationSQL)
  1659  	if err != nil {
  1660  		panic(err)
  1661  	}
  1662  
  1663  	testMigrationVersionNum := highestMigrationNumber + 1
  1664  	err = os.WriteFile(filepath.Join(testMigrationsDir, sqlstore.SQLMigrationsDir,
  1665  		fmt.Sprintf("%04d_testmigration.sql", testMigrationVersionNum)), sql, fs.ModePerm)
  1666  	if err != nil {
  1667  		panic(err)
  1668  	}
  1669  
  1670  	return testMigrationVersionNum, os.DirFS(testMigrationsDir)
  1671  }
  1672  
  1673  func migrateUpToDatabaseVersion(version int64) error {
  1674  	poolConfig, err := sqlConfig.ConnectionConfig.GetPoolConfig()
  1675  	if err != nil {
  1676  		return fmt.Errorf("failed to get pool config:%w", err)
  1677  	}
  1678  
  1679  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
  1680  	defer db.Close()
  1681  
  1682  	goose.SetBaseFS(nil)
  1683  	err = goose.UpTo(db, filepath.Join(testMigrationsDir, sqlstore.SQLMigrationsDir), version)
  1684  	if err != nil {
  1685  		return fmt.Errorf("failed to migrate up to version %d:%w", version, err)
  1686  	}
  1687  
  1688  	return nil
  1689  }
  1690  
  1691  func migrateDownToDatabaseVersion(version int64) error {
  1692  	poolConfig, err := sqlConfig.ConnectionConfig.GetPoolConfig()
  1693  	if err != nil {
  1694  		return fmt.Errorf("failed to get pool config:%w", err)
  1695  	}
  1696  
  1697  	db := stdlib.OpenDB(*poolConfig.ConnConfig)
  1698  	defer db.Close()
  1699  
  1700  	goose.SetBaseFS(nil)
  1701  	err = goose.DownTo(db, filepath.Join(testMigrationsDir, sqlstore.SQLMigrationsDir), version)
  1702  	if err != nil {
  1703  		return fmt.Errorf("failed to migrate down to version %d:%w", version, err)
  1704  	}
  1705  
  1706  	return nil
  1707  }
  1708  
  1709  func newTestEventSourceWithProtocolUpdateMessage() *TestEventSource {
  1710  	var currentBlock *entities.Block
  1711  	var m sync.RWMutex
  1712  	evtSource, err := newTestEventSource(func(e events.Event, evtsCh chan<- events.Event) {
  1713  		if e == nil {
  1714  			return
  1715  		}
  1716  		var err error
  1717  		switch e.Type() {
  1718  		case events.EndBlockEvent:
  1719  
  1720  		case events.BeginBlockEvent:
  1721  			m.Lock()
  1722  			if currentBlock != nil && currentBlock.Height == 2500 {
  1723  				evtsCh <- events.NewProtocolUpgradeStarted(context.Background(), eventsv1.ProtocolUpgradeStarted{
  1724  					LastBlockHeight: uint64(currentBlock.Height),
  1725  				})
  1726  			}
  1727  			beginBlock := e.(entities.BeginBlockEvent)
  1728  			currentBlock, err = entities.BlockFromBeginBlock(beginBlock)
  1729  			m.Unlock()
  1730  			if err != nil {
  1731  				panic(err)
  1732  			}
  1733  		}
  1734  	})
  1735  	if err != nil {
  1736  		panic(err)
  1737  	}
  1738  	return evtSource
  1739  }
  1740  
  1741  func Md5Hash(dir string) (string, error) {
  1742  	hash := md5.New()
  1743  	filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
  1744  		if err != nil {
  1745  			return err
  1746  		}
  1747  
  1748  		if info.IsDir() {
  1749  			return nil
  1750  		}
  1751  
  1752  		file, err := os.Open(path)
  1753  		if err != nil {
  1754  			return err
  1755  		}
  1756  		defer file.Close()
  1757  
  1758  		_, err = io.Copy(hash, file)
  1759  		if err != nil {
  1760  			return err
  1761  		}
  1762  
  1763  		return nil
  1764  	})
  1765  
  1766  	return hex.EncodeToString(hash.Sum(nil)), nil
  1767  }