github.com/aakash4dev/cometbft@v0.38.2/state/indexer/sink/psql/psql_test.go (about)

     1  package psql
     2  
     3  import (
     4  	"context"
     5  	"database/sql"
     6  	"flag"
     7  	"fmt"
     8  	"log"
     9  	"os"
    10  	"os/signal"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/adlio/schema"
    15  	"github.com/cosmos/gogoproto/proto"
    16  	"github.com/ory/dockertest"
    17  	"github.com/ory/dockertest/docker"
    18  	"github.com/stretchr/testify/assert"
    19  	"github.com/stretchr/testify/require"
    20  
    21  	abci "github.com/aakash4dev/cometbft/abci/types"
    22  	tmlog "github.com/aakash4dev/cometbft/libs/log"
    23  	"github.com/aakash4dev/cometbft/state/txindex"
    24  	"github.com/aakash4dev/cometbft/types"
    25  
    26  	// Register the Postgres database driver.
    27  	_ "github.com/lib/pq"
    28  )
    29  
    30  var (
    31  	doPauseAtExit = flag.Bool("pause-at-exit", false,
    32  		"If true, pause the test until interrupted at shutdown, to allow debugging")
    33  
    34  	// A hook that test cases can call to obtain the shared database instance
    35  	// used for testing the sink. This is initialized in TestMain (see below).
    36  	testDB func() *sql.DB
    37  )
    38  
    39  const (
    40  	user     = "postgres"
    41  	password = "secret"
    42  	port     = "5432"
    43  	dsn      = "postgres://%s:%s@localhost:%s/%s?sslmode=disable"
    44  	dbName   = "postgres"
    45  	chainID  = "test-chainID"
    46  
    47  	viewBlockEvents = "block_events"
    48  	viewTxEvents    = "tx_events"
    49  )
    50  
    51  func TestMain(m *testing.M) {
    52  	flag.Parse()
    53  
    54  	// Set up docker and start a container running PostgreSQL.
    55  	pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL"))
    56  	if err != nil {
    57  		log.Fatalf("Creating docker pool: %v", err)
    58  	}
    59  
    60  	resource, err := pool.RunWithOptions(&dockertest.RunOptions{
    61  		Repository: "postgres",
    62  		Tag:        "13",
    63  		Env: []string{
    64  			"POSTGRES_USER=" + user,
    65  			"POSTGRES_PASSWORD=" + password,
    66  			"POSTGRES_DB=" + dbName,
    67  			"listen_addresses = '*'",
    68  		},
    69  		ExposedPorts: []string{port},
    70  	}, func(config *docker.HostConfig) {
    71  		// set AutoRemove to true so that stopped container goes away by itself
    72  		config.AutoRemove = true
    73  		config.RestartPolicy = docker.RestartPolicy{
    74  			Name: "no",
    75  		}
    76  	})
    77  	if err != nil {
    78  		log.Fatalf("Starting docker pool: %v", err)
    79  	}
    80  
    81  	if *doPauseAtExit {
    82  		log.Print("Pause at exit is enabled, containers will not expire")
    83  	} else {
    84  		const expireSeconds = 60
    85  		_ = resource.Expire(expireSeconds)
    86  		log.Printf("Container expiration set to %d seconds", expireSeconds)
    87  	}
    88  
    89  	// Connect to the database, clear any leftover data, and install the
    90  	// indexing schema.
    91  	conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName)
    92  	var db *sql.DB
    93  
    94  	if err := pool.Retry(func() error {
    95  		sink, err := NewEventSink(conn, chainID)
    96  		if err != nil {
    97  			return err
    98  		}
    99  		db = sink.DB() // set global for test use
   100  		return db.Ping()
   101  	}); err != nil {
   102  		log.Fatalf("Connecting to database: %v", err)
   103  	}
   104  
   105  	if err := resetDatabase(db); err != nil {
   106  		log.Fatalf("Flushing database: %v", err)
   107  	}
   108  
   109  	sm, err := readSchema()
   110  	if err != nil {
   111  		log.Fatalf("Reading schema: %v", err)
   112  	}
   113  	migrator := schema.NewMigrator()
   114  	if err := migrator.Apply(db, sm); err != nil {
   115  		log.Fatalf("Applying schema: %v", err)
   116  	}
   117  
   118  	// Set up the hook for tests to get the shared database handle.
   119  	testDB = func() *sql.DB { return db }
   120  
   121  	// Run the selected test cases.
   122  	code := m.Run()
   123  
   124  	// Clean up and shut down the database container.
   125  	if *doPauseAtExit {
   126  		log.Print("Testing complete, pausing for inspection. Send SIGINT to resume teardown")
   127  		waitForInterrupt()
   128  		log.Print("(resuming)")
   129  	}
   130  	log.Print("Shutting down database")
   131  	if err := pool.Purge(resource); err != nil {
   132  		log.Printf("WARNING: Purging pool failed: %v", err)
   133  	}
   134  	if err := db.Close(); err != nil {
   135  		log.Printf("WARNING: Closing database failed: %v", err)
   136  	}
   137  
   138  	os.Exit(code)
   139  }
   140  
   141  func TestIndexing(t *testing.T) {
   142  	t.Run("IndexBlockEvents", func(t *testing.T) {
   143  		indexer := &EventSink{store: testDB(), chainID: chainID}
   144  		require.NoError(t, indexer.IndexBlockEvents(newTestBlockEvents()))
   145  
   146  		verifyBlock(t, 1)
   147  		verifyBlock(t, 2)
   148  
   149  		verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(1) })
   150  		verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) })
   151  
   152  		verifyNotImplemented(t, "block search", func() (bool, error) {
   153  			v, err := indexer.SearchBlockEvents(context.Background(), nil)
   154  			return v != nil, err
   155  		})
   156  
   157  		require.NoError(t, verifyTimeStamp(tableBlocks))
   158  
   159  		// Attempting to reindex the same events should gracefully succeed.
   160  		require.NoError(t, indexer.IndexBlockEvents(newTestBlockEvents()))
   161  	})
   162  
   163  	t.Run("IndexTxEvents", func(t *testing.T) {
   164  		indexer := &EventSink{store: testDB(), chainID: chainID}
   165  
   166  		txResult := txResultWithEvents([]abci.Event{
   167  			makeIndexedEvent("account.number", "1"),
   168  			makeIndexedEvent("account.owner", "Ivan"),
   169  			makeIndexedEvent("account.owner", "Yulieta"),
   170  
   171  			{Type: "", Attributes: []abci.EventAttribute{
   172  				{
   173  					Key:   "not_allowed",
   174  					Value: "Vlad",
   175  					Index: true,
   176  				},
   177  			}},
   178  		})
   179  		require.NoError(t, indexer.IndexTxEvents([]*abci.TxResult{txResult}))
   180  
   181  		txr, err := loadTxResult(types.Tx(txResult.Tx).Hash())
   182  		require.NoError(t, err)
   183  		assert.Equal(t, txResult, txr)
   184  
   185  		require.NoError(t, verifyTimeStamp(tableTxResults))
   186  		require.NoError(t, verifyTimeStamp(viewTxEvents))
   187  
   188  		verifyNotImplemented(t, "getTxByHash", func() (bool, error) {
   189  			txr, err := indexer.GetTxByHash(types.Tx(txResult.Tx).Hash())
   190  			return txr != nil, err
   191  		})
   192  		verifyNotImplemented(t, "tx search", func() (bool, error) {
   193  			txr, err := indexer.SearchTxEvents(context.Background(), nil)
   194  			return txr != nil, err
   195  		})
   196  
   197  		// try to insert the duplicate tx events.
   198  		err = indexer.IndexTxEvents([]*abci.TxResult{txResult})
   199  		require.NoError(t, err)
   200  	})
   201  
   202  	t.Run("IndexerService", func(t *testing.T) {
   203  		indexer := &EventSink{store: testDB(), chainID: chainID}
   204  
   205  		// event bus
   206  		eventBus := types.NewEventBus()
   207  		err := eventBus.Start()
   208  		require.NoError(t, err)
   209  		t.Cleanup(func() {
   210  			if err := eventBus.Stop(); err != nil {
   211  				t.Error(err)
   212  			}
   213  		})
   214  
   215  		service := txindex.NewIndexerService(indexer.TxIndexer(), indexer.BlockIndexer(), eventBus, true)
   216  		service.SetLogger(tmlog.TestingLogger())
   217  		err = service.Start()
   218  		require.NoError(t, err)
   219  		t.Cleanup(func() {
   220  			if err := service.Stop(); err != nil {
   221  				t.Error(err)
   222  			}
   223  		})
   224  
   225  		// publish block with txs
   226  		err = eventBus.PublishEventNewBlockEvents(types.EventDataNewBlockEvents{
   227  			Height: 1,
   228  			NumTxs: 2,
   229  		})
   230  		require.NoError(t, err)
   231  		txResult1 := &abci.TxResult{
   232  			Height: 1,
   233  			Index:  uint32(0),
   234  			Tx:     types.Tx("foo"),
   235  			Result: abci.ExecTxResult{Code: 0},
   236  		}
   237  		err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1})
   238  		require.NoError(t, err)
   239  		txResult2 := &abci.TxResult{
   240  			Height: 1,
   241  			Index:  uint32(1),
   242  			Tx:     types.Tx("bar"),
   243  			Result: abci.ExecTxResult{Code: 1},
   244  		}
   245  		err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2})
   246  		require.NoError(t, err)
   247  
   248  		time.Sleep(100 * time.Millisecond)
   249  		require.True(t, service.IsRunning())
   250  	})
   251  }
   252  
   253  func TestStop(t *testing.T) {
   254  	indexer := &EventSink{store: testDB()}
   255  	require.NoError(t, indexer.Stop())
   256  }
   257  
   258  // newTestBlock constructs a fresh copy of a new block event containing
   259  // known test values to exercise the indexer.
   260  func newTestBlockEvents() types.EventDataNewBlockEvents {
   261  	return types.EventDataNewBlockEvents{
   262  		Height: 1,
   263  		Events: []abci.Event{
   264  			makeIndexedEvent("begin_event.proposer", "FCAA001"),
   265  			makeIndexedEvent("thingy.whatzit", "O.O"),
   266  			makeIndexedEvent("end_event.foo", "100"),
   267  			makeIndexedEvent("thingy.whatzit", "-.O"),
   268  		},
   269  	}
   270  }
   271  
   272  // readSchema loads the indexing database schema file
   273  func readSchema() ([]*schema.Migration, error) {
   274  	const filename = "schema.sql"
   275  	contents, err := os.ReadFile(filename)
   276  	if err != nil {
   277  		return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err)
   278  	}
   279  
   280  	return []*schema.Migration{{
   281  		ID:     time.Now().Local().String() + " db schema",
   282  		Script: string(contents),
   283  	}}, nil
   284  }
   285  
   286  // resetDB drops all the data from the test database.
   287  func resetDatabase(db *sql.DB) error {
   288  	_, err := db.Exec(`DROP TABLE IF EXISTS blocks,tx_results,events,attributes CASCADE;`)
   289  	if err != nil {
   290  		return fmt.Errorf("dropping tables: %v", err)
   291  	}
   292  	_, err = db.Exec(`DROP VIEW IF EXISTS event_attributes,block_events,tx_events CASCADE;`)
   293  	if err != nil {
   294  		return fmt.Errorf("dropping views: %v", err)
   295  	}
   296  	return nil
   297  }
   298  
   299  // txResultWithEvents constructs a fresh transaction result with fixed values
   300  // for testing, that includes the specified events.
   301  func txResultWithEvents(events []abci.Event) *abci.TxResult {
   302  	return &abci.TxResult{
   303  		Height: 1,
   304  		Index:  0,
   305  		Tx:     types.Tx("HELLO WORLD"),
   306  		Result: abci.ExecTxResult{
   307  			Data:   []byte{0},
   308  			Code:   abci.CodeTypeOK,
   309  			Log:    "",
   310  			Events: events,
   311  		},
   312  	}
   313  }
   314  
   315  func loadTxResult(hash []byte) (*abci.TxResult, error) {
   316  	hashString := fmt.Sprintf("%X", hash)
   317  	var resultData []byte
   318  	if err := testDB().QueryRow(`
   319  SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1;
   320  `, hashString).Scan(&resultData); err != nil {
   321  		return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err)
   322  	}
   323  
   324  	txr := new(abci.TxResult)
   325  	if err := proto.Unmarshal(resultData, txr); err != nil {
   326  		return nil, fmt.Errorf("unmarshaling txr: %v", err)
   327  	}
   328  
   329  	return txr, nil
   330  }
   331  
   332  func verifyTimeStamp(tableName string) error {
   333  	return testDB().QueryRow(fmt.Sprintf(`
   334  SELECT DISTINCT %[1]s.created_at
   335    FROM %[1]s
   336    WHERE %[1]s.created_at >= $1;
   337  `, tableName), time.Now().Add(-2*time.Second)).Err()
   338  }
   339  
   340  func verifyBlock(t *testing.T, height int64) {
   341  	// Check that the blocks table contains an entry for this height.
   342  	if err := testDB().QueryRow(`
   343  SELECT height FROM `+tableBlocks+` WHERE height = $1;
   344  `, height).Err(); err == sql.ErrNoRows {
   345  		t.Errorf("No block found for height=%d", height)
   346  	} else if err != nil {
   347  		t.Fatalf("Database query failed: %v", err)
   348  	}
   349  
   350  	// Verify the presence of begin_block and end_block events.
   351  	if err := testDB().QueryRow(`
   352  SELECT type, height, chain_id FROM `+viewBlockEvents+`
   353    WHERE height = $1 AND type = $2 AND chain_id = $3;
   354  `, height, eventTypeFinalizeBlock, chainID).Err(); err == sql.ErrNoRows {
   355  		t.Errorf("No %q event found for height=%d", eventTypeFinalizeBlock, height)
   356  	} else if err != nil {
   357  		t.Fatalf("Database query failed: %v", err)
   358  	}
   359  }
   360  
   361  // verifyNotImplemented calls f and verifies that it returns both a
   362  // false-valued flag and a non-nil error whose string matching the expected
   363  // "not supported" message with label prefixed.
   364  func verifyNotImplemented(t *testing.T, label string, f func() (bool, error)) {
   365  	t.Helper()
   366  	t.Logf("Verifying that %q reports it is not implemented", label)
   367  
   368  	want := label + " is not supported via the postgres event sink"
   369  	ok, err := f()
   370  	assert.False(t, ok)
   371  	require.NotNil(t, err)
   372  	assert.Equal(t, want, err.Error())
   373  }
   374  
   375  // waitForInterrupt blocks until a SIGINT is received by the process.
   376  func waitForInterrupt() {
   377  	ch := make(chan os.Signal, 1)
   378  	signal.Notify(ch, os.Interrupt)
   379  	<-ch
   380  }