github.com/badrootd/celestia-core@v0.0.0-20240305091328-aa4207a4b25d/state/indexer/sink/psql/psql_test.go (about) 1 package psql 2 3 import ( 4 "context" 5 "database/sql" 6 "flag" 7 "fmt" 8 "log" 9 "os" 10 "os/signal" 11 "testing" 12 "time" 13 14 "github.com/adlio/schema" 15 "github.com/gogo/protobuf/proto" 16 "github.com/ory/dockertest" 17 "github.com/ory/dockertest/docker" 18 "github.com/stretchr/testify/assert" 19 "github.com/stretchr/testify/require" 20 21 abci "github.com/badrootd/celestia-core/abci/types" 22 "github.com/badrootd/celestia-core/state/txindex" 23 "github.com/badrootd/celestia-core/types" 24 25 // Register the Postgres database driver. 26 _ "github.com/lib/pq" 27 ) 28 29 var ( 30 doPauseAtExit = flag.Bool("pause-at-exit", false, 31 "If true, pause the test until interrupted at shutdown, to allow debugging") 32 33 // A hook that test cases can call to obtain the shared database instance 34 // used for testing the sink. This is initialized in TestMain (see below). 35 testDB func() *sql.DB 36 ) 37 38 const ( 39 user = "postgres" 40 password = "secret" 41 port = "5432" 42 dsn = "postgres://%s:%s@localhost:%s/%s?sslmode=disable" 43 dbName = "postgres" 44 chainID = "test-chainID" 45 46 viewBlockEvents = "block_events" 47 viewTxEvents = "tx_events" 48 ) 49 50 func TestMain(m *testing.M) { 51 flag.Parse() 52 53 // Set up docker and start a container running PostgreSQL. 54 pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL")) 55 if err != nil { 56 log.Fatalf("Creating docker pool: %v", err) 57 } 58 59 resource, err := pool.RunWithOptions(&dockertest.RunOptions{ 60 Repository: "postgres", 61 Tag: "13", 62 Env: []string{ 63 "POSTGRES_USER=" + user, 64 "POSTGRES_PASSWORD=" + password, 65 "POSTGRES_DB=" + dbName, 66 "listen_addresses = '*'", 67 }, 68 ExposedPorts: []string{port}, 69 }, func(config *docker.HostConfig) { 70 // set AutoRemove to true so that stopped container goes away by itself 71 config.AutoRemove = true 72 config.RestartPolicy = docker.RestartPolicy{ 73 Name: "no", 74 } 75 }) 76 if err != nil { 77 log.Fatalf("Starting docker pool: %v", err) 78 } 79 80 if *doPauseAtExit { 81 log.Print("Pause at exit is enabled, containers will not expire") 82 } else { 83 const expireSeconds = 60 84 _ = resource.Expire(expireSeconds) 85 log.Printf("Container expiration set to %d seconds", expireSeconds) 86 } 87 88 // Connect to the database, clear any leftover data, and install the 89 // indexing schema. 90 conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName) 91 var db *sql.DB 92 93 if err := pool.Retry(func() error { 94 sink, err := NewEventSink(conn, chainID) 95 if err != nil { 96 return err 97 } 98 db = sink.DB() // set global for test use 99 return db.Ping() 100 }); err != nil { 101 log.Fatalf("Connecting to database: %v", err) 102 } 103 104 if err := resetDatabase(db); err != nil { 105 log.Fatalf("Flushing database: %v", err) 106 } 107 108 sm, err := readSchema() 109 if err != nil { 110 log.Fatalf("Reading schema: %v", err) 111 } 112 migrator := schema.NewMigrator() 113 if err := migrator.Apply(db, sm); err != nil { 114 log.Fatalf("Applying schema: %v", err) 115 } 116 117 // Set up the hook for tests to get the shared database handle. 118 testDB = func() *sql.DB { return db } 119 120 // Run the selected test cases. 121 code := m.Run() 122 123 // Clean up and shut down the database container. 124 if *doPauseAtExit { 125 log.Print("Testing complete, pausing for inspection. Send SIGINT to resume teardown") 126 waitForInterrupt() 127 log.Print("(resuming)") 128 } 129 log.Print("Shutting down database") 130 if err := pool.Purge(resource); err != nil { 131 log.Printf("WARNING: Purging pool failed: %v", err) 132 } 133 if err := db.Close(); err != nil { 134 log.Printf("WARNING: Closing database failed: %v", err) 135 } 136 137 os.Exit(code) 138 } 139 140 func TestIndexing(t *testing.T) { 141 t.Run("IndexBlockEvents", func(t *testing.T) { 142 indexer := &EventSink{store: testDB(), chainID: chainID} 143 require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader())) 144 145 verifyBlock(t, 1) 146 verifyBlock(t, 2) 147 148 verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(1) }) 149 verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) }) 150 151 verifyNotImplemented(t, "block search", func() (bool, error) { 152 v, err := indexer.SearchBlockEvents(context.Background(), nil) 153 return v != nil, err 154 }) 155 156 require.NoError(t, verifyTimeStamp(tableBlocks)) 157 158 // Attempting to reindex the same events should gracefully succeed. 159 require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader())) 160 }) 161 162 t.Run("IndexTxEvents", func(t *testing.T) { 163 indexer := &EventSink{store: testDB(), chainID: chainID} 164 165 txResult := txResultWithEvents([]abci.Event{ 166 makeIndexedEvent("account.number", "1"), 167 makeIndexedEvent("account.owner", "Ivan"), 168 makeIndexedEvent("account.owner", "Yulieta"), 169 170 {Type: "", Attributes: []abci.EventAttribute{ 171 { 172 Key: "not_allowed", 173 Value: "Vlad", 174 Index: true, 175 }, 176 }}, 177 }) 178 require.NoError(t, indexer.IndexTxEvents([]*abci.TxResult{txResult})) 179 180 txr, err := loadTxResult(types.Tx(txResult.Tx).Hash()) 181 require.NoError(t, err) 182 assert.Equal(t, txResult, txr) 183 184 require.NoError(t, verifyTimeStamp(tableTxResults)) 185 require.NoError(t, verifyTimeStamp(viewTxEvents)) 186 187 verifyNotImplemented(t, "getTxByHash", func() (bool, error) { 188 txr, err := indexer.GetTxByHash(types.Tx(txResult.Tx).Hash()) 189 return txr != nil, err 190 }) 191 verifyNotImplemented(t, "tx search", func() (bool, error) { 192 txr, err := indexer.SearchTxEvents(context.Background(), nil) 193 return txr != nil, err 194 }) 195 196 // try to insert the duplicate tx events. 197 err = indexer.IndexTxEvents([]*abci.TxResult{txResult}) 198 require.NoError(t, err) 199 }) 200 201 t.Run("IndexerService", func(t *testing.T) { 202 indexer := &EventSink{store: testDB(), chainID: chainID} 203 204 // event bus 205 eventBus := types.NewEventBus() 206 err := eventBus.Start() 207 require.NoError(t, err) 208 t.Cleanup(func() { 209 if err := eventBus.Stop(); err != nil { 210 t.Error(err) 211 } 212 }) 213 214 service := txindex.NewIndexerService(indexer.TxIndexer(), indexer.BlockIndexer(), eventBus, true) 215 err = service.Start() 216 require.NoError(t, err) 217 t.Cleanup(func() { 218 if err := service.Stop(); err != nil { 219 t.Error(err) 220 } 221 }) 222 223 // publish block with txs 224 err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ 225 Header: types.Header{Height: 1}, 226 NumTxs: int64(2), 227 }) 228 require.NoError(t, err) 229 txResult1 := &abci.TxResult{ 230 Height: 1, 231 Index: uint32(0), 232 Tx: types.Tx("foo"), 233 Result: abci.ResponseDeliverTx{Code: 0}, 234 } 235 err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1}) 236 require.NoError(t, err) 237 txResult2 := &abci.TxResult{ 238 Height: 1, 239 Index: uint32(1), 240 Tx: types.Tx("bar"), 241 Result: abci.ResponseDeliverTx{Code: 1}, 242 } 243 err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2}) 244 require.NoError(t, err) 245 246 time.Sleep(100 * time.Millisecond) 247 require.True(t, service.IsRunning()) 248 }) 249 } 250 251 func TestStop(t *testing.T) { 252 indexer := &EventSink{store: testDB()} 253 require.NoError(t, indexer.Stop()) 254 } 255 256 // newTestBlockHeader constructs a fresh copy of a block header containing 257 // known test values to exercise the indexer. 258 func newTestBlockHeader() types.EventDataNewBlockHeader { 259 return types.EventDataNewBlockHeader{ 260 Header: types.Header{Height: 1}, 261 ResultBeginBlock: abci.ResponseBeginBlock{ 262 Events: []abci.Event{ 263 makeIndexedEvent("begin_event.proposer", "FCAA001"), 264 makeIndexedEvent("thingy.whatzit", "O.O"), 265 }, 266 }, 267 ResultEndBlock: abci.ResponseEndBlock{ 268 Events: []abci.Event{ 269 makeIndexedEvent("end_event.foo", "100"), 270 makeIndexedEvent("thingy.whatzit", "-.O"), 271 }, 272 }, 273 } 274 } 275 276 // readSchema loads the indexing database schema file 277 func readSchema() ([]*schema.Migration, error) { 278 const filename = "schema.sql" 279 contents, err := os.ReadFile(filename) 280 if err != nil { 281 return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) 282 } 283 284 return []*schema.Migration{{ 285 ID: time.Now().Local().String() + " db schema", 286 Script: string(contents), 287 }}, nil 288 } 289 290 // resetDB drops all the data from the test database. 291 func resetDatabase(db *sql.DB) error { 292 _, err := db.Exec(`DROP TABLE IF EXISTS blocks,tx_results,events,attributes CASCADE;`) 293 if err != nil { 294 return fmt.Errorf("dropping tables: %v", err) 295 } 296 _, err = db.Exec(`DROP VIEW IF EXISTS event_attributes,block_events,tx_events CASCADE;`) 297 if err != nil { 298 return fmt.Errorf("dropping views: %v", err) 299 } 300 return nil 301 } 302 303 // txResultWithEvents constructs a fresh transaction result with fixed values 304 // for testing, that includes the specified events. 305 func txResultWithEvents(events []abci.Event) *abci.TxResult { 306 return &abci.TxResult{ 307 Height: 1, 308 Index: 0, 309 Tx: types.Tx("HELLO WORLD"), 310 Result: abci.ResponseDeliverTx{ 311 Data: []byte{0}, 312 Code: abci.CodeTypeOK, 313 Log: "", 314 Events: events, 315 }, 316 } 317 } 318 319 func loadTxResult(hash []byte) (*abci.TxResult, error) { 320 hashString := fmt.Sprintf("%X", hash) 321 var resultData []byte 322 if err := testDB().QueryRow(` 323 SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1; 324 `, hashString).Scan(&resultData); err != nil { 325 return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err) 326 } 327 328 txr := new(abci.TxResult) 329 if err := proto.Unmarshal(resultData, txr); err != nil { 330 return nil, fmt.Errorf("unmarshaling txr: %v", err) 331 } 332 333 return txr, nil 334 } 335 336 func verifyTimeStamp(tableName string) error { 337 return testDB().QueryRow(fmt.Sprintf(` 338 SELECT DISTINCT %[1]s.created_at 339 FROM %[1]s 340 WHERE %[1]s.created_at >= $1; 341 `, tableName), time.Now().Add(-2*time.Second)).Err() 342 } 343 344 func verifyBlock(t *testing.T, height int64) { 345 // Check that the blocks table contains an entry for this height. 346 if err := testDB().QueryRow(` 347 SELECT height FROM `+tableBlocks+` WHERE height = $1; 348 `, height).Err(); err == sql.ErrNoRows { 349 t.Errorf("No block found for height=%d", height) 350 } else if err != nil { 351 t.Fatalf("Database query failed: %v", err) 352 } 353 354 // Verify the presence of begin_block and end_block events. 355 if err := testDB().QueryRow(` 356 SELECT type, height, chain_id FROM `+viewBlockEvents+` 357 WHERE height = $1 AND type = $2 AND chain_id = $3; 358 `, height, eventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows { 359 t.Errorf("No %q event found for height=%d", eventTypeBeginBlock, height) 360 } else if err != nil { 361 t.Fatalf("Database query failed: %v", err) 362 } 363 364 if err := testDB().QueryRow(` 365 SELECT type, height, chain_id FROM `+viewBlockEvents+` 366 WHERE height = $1 AND type = $2 AND chain_id = $3; 367 `, height, eventTypeEndBlock, chainID).Err(); err == sql.ErrNoRows { 368 t.Errorf("No %q event found for height=%d", eventTypeEndBlock, height) 369 } else if err != nil { 370 t.Fatalf("Database query failed: %v", err) 371 } 372 } 373 374 // verifyNotImplemented calls f and verifies that it returns both a 375 // false-valued flag and a non-nil error whose string matching the expected 376 // "not supported" message with label prefixed. 377 func verifyNotImplemented(t *testing.T, label string, f func() (bool, error)) { 378 t.Helper() 379 t.Logf("Verifying that %q reports it is not implemented", label) 380 381 want := label + " is not supported via the postgres event sink" 382 ok, err := f() 383 assert.False(t, ok) 384 require.NotNil(t, err) 385 assert.Equal(t, want, err.Error()) 386 } 387 388 // waitForInterrupt blocks until a SIGINT is received by the process. 389 func waitForInterrupt() { 390 ch := make(chan os.Signal, 1) 391 signal.Notify(ch, os.Interrupt) 392 <-ch 393 }