code.vegaprotocol.io/vega@v0.79.0/datanode/networkhistory/snapshot/service_load_snapshot.go (about) 1 // Copyright (C) 2023 Gobalsky Labs Limited 2 // 3 // This program is free software: you can redistribute it and/or modify 4 // it under the terms of the GNU Affero General Public License as 5 // published by the Free Software Foundation, either version 3 of the 6 // License, or (at your option) any later version. 7 // 8 // This program is distributed in the hope that it will be useful, 9 // but WITHOUT ANY WARRANTY; without even the implied warranty of 10 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 // GNU Affero General Public License for more details. 12 // 13 // You should have received a copy of the GNU Affero General Public License 14 // along with this program. If not, see <http://www.gnu.org/licenses/>. 15 16 package snapshot 17 18 import ( 19 "context" 20 "fmt" 21 "io" 22 "os" 23 "path" 24 "sort" 25 "time" 26 27 "code.vegaprotocol.io/vega/datanode/networkhistory/segment" 28 "code.vegaprotocol.io/vega/datanode/sqlstore" 29 "code.vegaprotocol.io/vega/logging" 30 31 "github.com/georgysavva/scany/pgxscan" 32 "github.com/jackc/pgtype" 33 "github.com/jackc/pgx/v4" 34 "github.com/jackc/pgx/v4/pgxpool" 35 "github.com/klauspost/compress/zip" 36 "go.uber.org/zap" 37 ) 38 39 type LoadResult struct { 40 LoadedFromHeight int64 41 LoadedToHeight int64 42 RowsLoaded int64 43 } 44 45 type LoadLog interface { 46 Infof(s string, args ...interface{}) 47 Info(msg string, fields ...zap.Field) 48 Error(msg string, fields ...zap.Field) 49 } 50 51 func (b *Service) RollbackToSegment(ctx context.Context, log LoadLog, rollbackToSegment segment.Full) error { 52 dbMeta, err := NewDatabaseMetaData(ctx, b.connPool) 53 if err != nil { 54 return fmt.Errorf("failed to get database meta data: %w", err) 55 } 56 57 if rollbackToSegment.GetDatabaseVersion() < dbMeta.DatabaseVersion { 58 log.Infof("rolling back database to version %d from version %d", rollbackToSegment.GetDatabaseVersion(), dbMeta.DatabaseVersion) 59 60 err = b.migrateSchemaDownToVersion(rollbackToSegment.GetDatabaseVersion()) 61 if err != nil { 62 return fmt.Errorf("failed to migrate down database from version %d to %d: %w", 63 dbMeta.DatabaseVersion, rollbackToSegment.GetDatabaseVersion(), err) 64 } 65 66 // Update the meta data after schema migration 67 dbMeta, err = NewDatabaseMetaData(ctx, b.connPool) 68 if err != nil { 69 return fmt.Errorf("failed to get database meta data after migration: %w", err) 70 } 71 } 72 73 tx, err := b.connPool.Begin(ctx) 74 if err != nil { 75 return fmt.Errorf("failed to begin transaction: %w", err) 76 } 77 // Rollback on a committed transaction has no effect 78 defer func() { _ = tx.Rollback(ctx) }() 79 80 rollbackToBlock, err := sqlstore.GetAtHeightUsingConnection(ctx, tx, rollbackToSegment.GetToHeight()) 81 if err != nil { 82 return fmt.Errorf("failed to get block at height: %w", err) 83 } 84 85 for _, meta := range dbMeta.TableNameToMetaData { 86 if meta.Hypertable { 87 result, err := tx.Exec(ctx, fmt.Sprintf("delete from %s where vega_time > $1", meta.Name), rollbackToBlock.VegaTime) 88 if err != nil { 89 return fmt.Errorf("failed to delete rows from %s: %w", meta.Name, err) 90 } 91 log.Infof("deleted %d rows from %s", result.RowsAffected(), meta.Name) 92 } 93 } 94 95 segments := []segment.Full{rollbackToSegment} 96 rowsCopied, err := b.loadSegmentsWithTransaction(ctx, log, tx.Conn(), segments, true, true, dbMeta, map[string]time.Time{}) 97 if err != nil { 98 return fmt.Errorf("failed to load current state: %w", err) 99 } 100 101 log.Infof("Restored current state from snapshot, %d rows loaded", rowsCopied) 102 103 err = tx.Commit(ctx) 104 if err != nil { 105 return fmt.Errorf("failed to commit transaction: %w", err) 106 } 107 108 log.Infof("updating continuous aggregate data") 109 err = updateContinuousAggregateDataFromHeight(ctx, b.connPool, rollbackToSegment.GetToHeight()) 110 if err != nil { 111 return fmt.Errorf("failed to update continuous aggregate data: %w", err) 112 } 113 114 return nil 115 } 116 117 func (b *Service) LoadSnapshotData( 118 ctx context.Context, 119 log LoadLog, 120 ch segment.ContiguousHistory[segment.Full], 121 connConfig sqlstore.ConnectionConfig, 122 optimiseForAppend, 123 verbose bool, 124 ) (LoadResult, error) { 125 if len(ch.Segments) == 0 { 126 return LoadResult{}, fmt.Errorf("no segments to load") 127 } 128 129 datanodeBlockSpan, err := sqlstore.GetDatanodeBlockSpan(ctx, b.connPool) 130 if err != nil { 131 return LoadResult{}, fmt.Errorf("failed to check if datanode has data: %w", err) 132 } 133 134 if err = validateSpanOfHistoryToLoad(datanodeBlockSpan, ch.HeightFrom, ch.HeightTo); err != nil { 135 return LoadResult{}, fmt.Errorf("failed to validate span of history to load: %w", err) 136 } 137 138 loadedFromHeight := int64(0) 139 if datanodeBlockSpan.HasData { 140 loadedFromHeight = datanodeBlockSpan.ToHeight + 1 141 } else { 142 err = sqlstore.RevertToSchemaVersionZero(b.log, connConfig, sqlstore.EmbedMigrations, verbose) 143 if err != nil { 144 return LoadResult{}, fmt.Errorf("failed to revert scheam to version zero: %w", err) 145 } 146 loadedFromHeight = ch.HeightFrom 147 } 148 149 _, err = b.connPool.Exec(ctx, "SET TIME ZONE 0") 150 if err != nil { 151 return LoadResult{}, fmt.Errorf("failed to set timezone to UTC: %w", err) 152 } 153 154 dbMetaData, err := NewDatabaseMetaData(ctx, b.connPool) 155 if err != nil { 156 return LoadResult{}, fmt.Errorf("failed to get database meta data: %w", err) 157 } 158 159 log.Info("copying data into database", logging.Int64("database version", dbMetaData.DatabaseVersion)) 160 161 var totalRowsCopied int64 162 163 historyTableLastTimestampMap, err := b.getLastHistoryTimestampMap(ctx, dbMetaData) 164 if err != nil { 165 return LoadResult{}, fmt.Errorf("failed to get last timestamp for history tables: %w", err) 166 } 167 168 dbVersionToSegments := map[int64][]segment.Full{} 169 170 for _, segment := range ch.Segments { 171 dbVersion := segment.GetDatabaseVersion() 172 dbVersionToSegments[dbVersion] = append(dbVersionToSegments[dbVersion], segment) 173 } 174 175 dbVersionsAsc := make([]int64, 0, len(dbVersionToSegments)) 176 for k := range dbVersionToSegments { 177 dbVersionsAsc = append(dbVersionsAsc, k) 178 } 179 sort.Slice(dbVersionsAsc, func(i, j int) bool { 180 return dbVersionsAsc[i] < dbVersionsAsc[j] 181 }) 182 183 for _, targetDatabaseVersion := range dbVersionsAsc { 184 if dbMetaData.DatabaseVersion != targetDatabaseVersion { 185 currentDatabaseVersion := dbMetaData.DatabaseVersion 186 log.Info("migrating database", logging.Int64("current database version", currentDatabaseVersion), logging.Int64("target database version", targetDatabaseVersion)) 187 188 err := b.migrateSchemaUpToVersion(targetDatabaseVersion) 189 if err != nil { 190 return LoadResult{}, fmt.Errorf("failed to migrate schema to version %d: %w", targetDatabaseVersion, err) 191 } 192 193 // After migration update the database meta-data 194 dbMetaData, err = NewDatabaseMetaData(ctx, b.connPool) 195 if err != nil { 196 return LoadResult{}, fmt.Errorf("failed to get database meta data after database migration: %w", err) 197 } 198 199 log.Infof("finished migrating database from version %d to version %d", currentDatabaseVersion, targetDatabaseVersion) 200 } 201 202 log.Infof("loading all segments with database version: %d", targetDatabaseVersion) 203 tx, err := b.connPool.Begin(ctx) 204 if err != nil { 205 return LoadResult{}, fmt.Errorf("failed to begin transaction: %w", err) 206 } 207 // Rollback on a committed transaction has no effect 208 defer func() { _ = tx.Rollback(ctx) }() 209 210 rowsCopied, err := b.loadSegmentsWithTransaction(ctx, log, tx.Conn(), dbVersionToSegments[targetDatabaseVersion], optimiseForAppend, 211 false, dbMetaData, historyTableLastTimestampMap) 212 if err != nil { 213 return LoadResult{}, fmt.Errorf("failed to load segments for database version %d: %w", targetDatabaseVersion, err) 214 } 215 err = tx.Commit(ctx) 216 if err != nil { 217 return LoadResult{}, fmt.Errorf("failed to commit transaction: %w", err) 218 } 219 220 totalRowsCopied += rowsCopied 221 } 222 223 log.Infof("recreating continuous aggregate data") 224 err = UpdateContinuousAggregateDataFromHighWaterMark(ctx, b.connPool, ch.HeightTo) 225 if err != nil { 226 return LoadResult{}, fmt.Errorf("failed to recreate continuous aggregate data: %w", err) 227 } 228 229 return LoadResult{ 230 LoadedFromHeight: loadedFromHeight, 231 LoadedToHeight: ch.HeightTo, 232 RowsLoaded: totalRowsCopied, 233 }, nil 234 } 235 236 func (b *Service) loadSegmentsWithTransaction(ctx context.Context, log LoadLog, conn *pgx.Conn, segments []segment.Full, 237 optimiseForAppend, currentStateOnly bool, dbMetaData DatabaseMetadata, 238 historyTableLastTimestampMap map[string]time.Time, 239 ) (int64, error) { 240 err := executeAllSql(ctx, conn, log, dbMetaData.AllTablesDisableAutoVacuumSql) 241 if err != nil { 242 return 0, fmt.Errorf("failed to execute disable autovacuum sql: %w", err) 243 } 244 245 err = executeAllSql(ctx, conn, log, dbMetaData.CurrentStateTablesDropConstraintsSql) 246 if err != nil { 247 return 0, fmt.Errorf("failed to execute current state table drop constraints sql: %w", err) 248 } 249 250 var droppedIndexes []IndexInfo 251 if !optimiseForAppend { 252 log.Infof("dropping history table constraints") 253 err = executeAllSql(ctx, conn, log, dbMetaData.HistoryStateTablesDropConstraintsSql) 254 if err != nil { 255 return 0, fmt.Errorf("failed to executed history table drop constraints sql: %w", err) 256 } 257 258 log.Infof("dropping history table indexes") 259 droppedIndexes, err = dropHistoryTableIndexes(ctx, conn, log, dbMetaData) 260 if err != nil { 261 return 0, fmt.Errorf("failed to drop history table indexes: %w", err) 262 } 263 } 264 265 var historyRowsCopied int64 266 if !currentStateOnly { 267 historyRowsCopied, err = b.loadHistorySegments(ctx, log, conn, segments, dbMetaData, historyTableLastTimestampMap) 268 if err != nil { 269 return 0, fmt.Errorf("failed to load history segments: %w", err) 270 } 271 } 272 273 currentStateRowsCopied, err := b.loadCurrentState(ctx, log, conn, segments[len(segments)-1], dbMetaData, 274 historyTableLastTimestampMap) 275 if err != nil { 276 return 0, fmt.Errorf("failed to load current state: %w", err) 277 } 278 279 if !optimiseForAppend { 280 log.Infof("restoring history table indexes") 281 err = createIndexes(ctx, conn, log, droppedIndexes) 282 if err != nil { 283 return 0, fmt.Errorf("failed to create indexes: %w", err) 284 } 285 286 log.Infof("restoring history table constraints") 287 err = executeAllSql(ctx, conn, log, dbMetaData.HistoryStateTablesCreateConstraintsSql) 288 if err != nil { 289 return 0, fmt.Errorf("failed to executed history table create constraints sql: %w", err) 290 } 291 } 292 293 log.Infof("restoring current state table constraints") 294 err = executeAllSql(ctx, conn, log, dbMetaData.CurrentStateTablesCreateConstraintsSql) 295 if err != nil { 296 return 0, fmt.Errorf("failed to executed current state table create constraints sql: %w", err) 297 } 298 299 err = executeAllSql(ctx, conn, log, dbMetaData.AllTablesEnableAutoVacuumSql) 300 if err != nil { 301 return 0, fmt.Errorf("failed to execute enable autovacuum sql: %w", err) 302 } 303 304 return historyRowsCopied + currentStateRowsCopied, nil 305 } 306 307 func (b *Service) loadCurrentState(ctx context.Context, log LoadLog, conn *pgx.Conn, segment segment.Full, 308 dbMetaData DatabaseMetadata, historyTableLastTimestampMap map[string]time.Time, 309 ) (int64, error) { 310 rowsCopied, err := b.loadSegment(ctx, conn, log, segment, "currentstate/", dbMetaData, historyTableLastTimestampMap) 311 if err != nil { 312 return 0, fmt.Errorf("failed to load current state snapshot %s: %w", segment, err) 313 } 314 315 return rowsCopied, nil 316 } 317 318 func (b *Service) loadHistorySegments(ctx context.Context, log LoadLog, conn *pgx.Conn, segments []segment.Full, 319 dbMetaData DatabaseMetadata, historyTableLastTimestampMap map[string]time.Time, 320 ) (int64, error) { 321 var totalRowsCopied int64 322 for _, segment := range segments { 323 rowsCopied, err := b.loadSegment(ctx, conn, log, segment, "history/", dbMetaData, historyTableLastTimestampMap) 324 if err != nil { 325 return 0, fmt.Errorf("failed to load history segment %s: %w", segment, err) 326 } 327 totalRowsCopied += rowsCopied 328 } 329 330 return totalRowsCopied, nil 331 } 332 333 func validateSpanOfHistoryToLoad(existingDatanodeSpan sqlstore.DatanodeBlockSpan, historyFromHeight int64, historyToHeight int64) error { 334 if !existingDatanodeSpan.HasData { 335 return nil 336 } 337 338 if historyFromHeight < existingDatanodeSpan.FromHeight { 339 return fmt.Errorf("loading history from height %d is not possible as it is before the datanodes oldest block height %d, to load this history first empty the datanode", 340 historyFromHeight, existingDatanodeSpan.FromHeight) 341 } 342 343 if historyFromHeight > existingDatanodeSpan.ToHeight+1 { 344 return fmt.Errorf("the from height of the history to load, %d, must fall within or be one greater than the datanodes current span of %d to %d", historyFromHeight, 345 existingDatanodeSpan.FromHeight, existingDatanodeSpan.ToHeight) 346 } 347 348 if historyFromHeight >= existingDatanodeSpan.FromHeight && historyToHeight <= existingDatanodeSpan.ToHeight { 349 return fmt.Errorf("the span of history requested to load, %d to %d, is within the datanodes current span of %d to %d", historyFromHeight, 350 historyToHeight, existingDatanodeSpan.FromHeight, existingDatanodeSpan.ToHeight) 351 } 352 353 return nil 354 } 355 356 func (b *Service) loadSegment(ctx context.Context, conn *pgx.Conn, log LoadLog, segment segment.Full, zipDir string, 357 dbMetaData DatabaseMetadata, historyTableLastTimestamps map[string]time.Time, 358 ) (int64, error) { 359 // first we fetch and save the segment 360 staged, err := b.historyStore.StagedSegment(ctx, segment) 361 if err != nil { 362 return 0, err 363 } 364 365 // then we dip our fingers into the zip-file 366 reader, err := zip.OpenReader(staged.ZipFilePath()) 367 if err != nil { 368 return 0, fmt.Errorf("failed to open zip reader: %w", err) 369 } 370 defer func() { 371 if rerr := reader.Close(); rerr != nil { 372 b.log.Error("unable to close zip reader", logging.Error(rerr)) 373 } 374 }() 375 376 // push it into the database 377 log.Info("copying into database", logging.String("segment", staged.ZipFileName())) 378 startTime := time.Now() 379 rowsCopied, err := copyDataIntoDatabase(ctx, conn, reader, zipDir, dbMetaData, historyTableLastTimestamps) 380 if err != nil { 381 return 0, fmt.Errorf("failed to copy data into the database %s : %w", staged.ZipFileName(), err) 382 } 383 log.Info("copy complete", logging.Int64("rows", rowsCopied), logging.String("segment", staged.ZipFileName()), logging.Duration("took", time.Since(startTime))) 384 385 // and finally remove the zipfile we staged earlier 386 log.Info("removing staged segment", logging.String("path", staged.ZipFilePath())) 387 if err := os.Remove(staged.ZipFilePath()); err != nil { 388 log.Error("unable to remove staged segment", logging.Error(err)) 389 } 390 391 return rowsCopied, nil 392 } 393 394 func getLastPartitionColumnEntryForHistoryTable(ctx context.Context, vegaDbConn *pgxpool.Conn, historyTableMetaData TableMetadata) (time.Time, error) { 395 timeSelect := fmt.Sprintf(`SELECT %s FROM %s order by %s desc limit 1`, historyTableMetaData.PartitionColumn, historyTableMetaData.Name, 396 historyTableMetaData.PartitionColumn) 397 398 rows, err := vegaDbConn.Query(ctx, timeSelect) 399 if err != nil { 400 return time.Time{}, fmt.Errorf("failed to query last partition column time: %w", err) 401 } 402 defer rows.Close() 403 404 if rows.Next() { 405 values, err := rows.Values() 406 if err != nil { 407 return time.Time{}, fmt.Errorf("failed to get values for row: %w", err) 408 } 409 if len(values) != 1 { 410 return time.Time{}, fmt.Errorf("expected just 1 value got %d", len(values)) 411 } 412 partitionTime, ok := values[0].(time.Time) 413 if !ok { 414 return time.Time{}, fmt.Errorf("expected value to be of type time, got %v", values[0]) 415 } 416 417 return partitionTime, nil 418 } 419 420 return time.Time{}, nil 421 } 422 423 func (b *Service) getLastHistoryTimestampMap(ctx context.Context, dbMetadata DatabaseMetadata) (map[string]time.Time, error) { 424 lastHistoryTimestampMap := map[string]time.Time{} 425 426 conn, err := b.connPool.Acquire(ctx) 427 if err != nil { 428 return nil, fmt.Errorf("failed to acquire connection: %w", err) 429 } 430 defer conn.Release() 431 432 for table, metadata := range dbMetadata.TableNameToMetaData { 433 if metadata.PartitionColumn != "" { 434 lastPartitionTime, err := getLastPartitionColumnEntryForHistoryTable(ctx, conn, metadata) 435 if err != nil { 436 return nil, fmt.Errorf("failed to get last partition column entry for history table %s: %w", table, err) 437 } 438 lastHistoryTimestampMap[table] = lastPartitionTime 439 } 440 } 441 442 return lastHistoryTimestampMap, nil 443 } 444 445 func executeAllSql(ctx context.Context, vegaDbConn sqlstore.Connection, loadLog LoadLog, allSql []string) error { 446 for _, sql := range allSql { 447 loadLog.Infof("executing sql: %s", sql) 448 _, err := vegaDbConn.Exec(ctx, sql) 449 if err != nil { 450 return fmt.Errorf("failed to execute sql %s: %w", sql, err) 451 } 452 } 453 return nil 454 } 455 456 func dropHistoryTableIndexes(ctx context.Context, vegaDbConn sqlstore.Connection, loadLog LoadLog, 457 dbMetadata DatabaseMetadata, 458 ) ([]IndexInfo, error) { 459 var indexes []IndexInfo 460 461 rows, err := vegaDbConn.Query(ctx, `select tablename, Indexname, Indexdef from pg_indexes where schemaname ='public' order by tablename`) 462 if err != nil { 463 return nil, fmt.Errorf("failed to get table indexes: %w", err) 464 } 465 466 var allIndexes []IndexInfo 467 if err = pgxscan.ScanAll(&allIndexes, rows); err != nil { 468 return nil, fmt.Errorf("scanning table indexes: %w", err) 469 } 470 471 for _, index := range allIndexes { 472 if dbMetadata.TableNameToMetaData[index.Tablename].Hypertable { 473 indexes = append(indexes, index) 474 } 475 } 476 477 loadLog.Infof("dropping history table indexes") 478 for _, index := range indexes { 479 _, err = vegaDbConn.Exec(ctx, fmt.Sprintf("DROP INDEX %s", index.Indexname)) 480 481 if err != nil { 482 return nil, fmt.Errorf("failed to drop index %s: %w", index.Indexname, err) 483 } 484 } 485 486 return indexes, nil 487 } 488 489 func copyDataIntoDatabase(ctx context.Context, conn *pgx.Conn, zipReader *zip.ReadCloser, zipDir string, dbMetaData DatabaseMetadata, 490 historyTableLastTimestamps map[string]time.Time, 491 ) (rowsCopied int64, err error) { 492 // Disable all triggers 493 _, err = conn.Exec(ctx, "SET session_replication_role = replica;") 494 if err != nil { 495 return 0, fmt.Errorf("failed to disable triggers, setting session replication role to replica failed: %w", err) 496 } 497 defer func() { 498 _, triggersErr := conn.Exec(ctx, "SET session_replication_role = DEFAULT;") 499 if err == nil && triggersErr != nil { 500 err = fmt.Errorf("failed to re-enable triggers, setting session replication role to default failed: %w", err) 501 rowsCopied = 0 502 } 503 }() 504 505 var totalRowsCopied int64 506 507 for _, file := range zipReader.File { 508 dir, tableName := path.Split(file.Name) 509 if dir != zipDir { 510 continue 511 } 512 513 tableReader, err := file.Open() 514 if err != nil { 515 return 0, fmt.Errorf("failed to open file inside segment archive %w", err) 516 } 517 518 rowsCopied, err := copyTableDataIntoDatabase(ctx, dbMetaData.TableNameToMetaData[tableName], tableReader, conn, historyTableLastTimestamps) 519 520 // close the file 521 _ = tableReader.Close() 522 523 if err != nil { 524 return 0, fmt.Errorf("failed to copy data into table %s: %w", tableName, err) 525 } 526 527 totalRowsCopied += rowsCopied 528 } 529 530 return totalRowsCopied, nil 531 } 532 533 func copyTableDataIntoDatabase(ctx context.Context, tableMetaData TableMetadata, reader io.ReadCloser, 534 conn *pgx.Conn, historyTableLastTimestamps map[string]time.Time, 535 ) (int64, error) { 536 var err error 537 var rowsCopied int64 538 539 if tableMetaData.Hypertable { 540 rowsCopied, err = copyHistoryTableDataIntoDatabase(ctx, tableMetaData, reader, conn, historyTableLastTimestamps) 541 if err != nil { 542 return 0, fmt.Errorf("failed to copy history table data into database: %w", err) 543 } 544 } else { 545 tableTruncateSQL := fmt.Sprintf("truncate table %s", tableMetaData.Name) 546 _, err := conn.Exec(ctx, tableTruncateSQL) 547 if err != nil { 548 return 0, fmt.Errorf("failed to truncate table %s: %w", tableMetaData.Name, err) 549 } 550 551 rowsCopied, err = copyCurrentStateTableDataIntoDatabase(ctx, tableMetaData, conn, reader) 552 if err != nil { 553 return 0, fmt.Errorf("failed to copy current state table data into database: %w", err) 554 } 555 } 556 return rowsCopied, nil 557 } 558 559 func copyCurrentStateTableDataIntoDatabase(ctx context.Context, tableMetaData TableMetadata, conn *pgx.Conn, reader io.Reader) (int64, error) { 560 query := fmt.Sprintf(`copy %s from STDIN (FORMAT csv, HEADER)`, tableMetaData.Name) 561 tag, err := conn.PgConn().CopyFrom(ctx, reader, query) 562 if err != nil { 563 return 0, fmt.Errorf("failed to copy data into current state table: %w", err) 564 } 565 566 return tag.RowsAffected(), nil 567 } 568 569 func copyHistoryTableDataIntoDatabase(ctx context.Context, tableMetaData TableMetadata, reader io.Reader, conn *pgx.Conn, 570 historyTableLastTimestamps map[string]time.Time, 571 ) (int64, error) { 572 partitionColumn := tableMetaData.PartitionColumn 573 timestampString, err := encodeTimestampToString(historyTableLastTimestamps[tableMetaData.Name]) 574 if err != nil { 575 return 0, fmt.Errorf("failed to encode timestamp into string: %w", err) 576 } 577 578 copyQuery := fmt.Sprintf(`copy %s from STDIN (FORMAT csv, HEADER) where %s > timestamp '%s'`, tableMetaData.Name, 579 partitionColumn, timestampString) 580 581 tag, err := conn.PgConn().CopyFrom(ctx, reader, copyQuery) 582 if err != nil { 583 return 0, fmt.Errorf("failed to copy data into hyper-table %s: %w", tableMetaData.Name, err) 584 } 585 586 return tag.RowsAffected(), nil 587 } 588 589 // encodeTimestampToString is required as pgx does not support parameter interpolation on copy statements. 590 func encodeTimestampToString(lastPartitionColumnEntry time.Time) ([]byte, error) { 591 lastPartitionColumnEntry = lastPartitionColumnEntry.UTC() 592 593 ts := pgtype.Timestamp{ 594 Time: lastPartitionColumnEntry, 595 Status: pgtype.Present, 596 } 597 598 var err error 599 var timeText []byte 600 timeText, err = ts.EncodeText(nil, timeText) 601 if err != nil { 602 return nil, fmt.Errorf("failed to encode timestamp: %w", err) 603 } 604 return timeText, nil 605 } 606 607 func createIndexes(ctx context.Context, vegaDbConn sqlstore.Connection, 608 loadLog LoadLog, indexes []IndexInfo, 609 ) error { 610 for _, index := range indexes { 611 loadLog.Infof("creating index %s", index.Indexname) 612 _, err := vegaDbConn.Exec(ctx, index.Indexdef) 613 if err != nil { 614 return fmt.Errorf("failed to create index %s: %w", index.Indexname, err) 615 } 616 } 617 return nil 618 } 619 620 func updateContinuousAggregateDataFromHeight(ctx context.Context, conn *pgxpool.Pool, height int64) error { 621 fromBlock, err := sqlstore.GetAtHeightUsingConnection(ctx, conn, height) 622 if err != nil { 623 return fmt.Errorf("failed to get to block: %w", err) 624 } 625 626 dbMetaData, err := NewDatabaseMetaData(ctx, conn) 627 if err != nil { 628 return fmt.Errorf("failed to get database meta data: %w", err) 629 } 630 631 for _, cagg := range dbMetaData.ContinuousAggregatesMetaData { 632 var err error 633 highWatermark, err := getHighwaterMarkForCagg(ctx, conn, cagg) 634 if err != nil { 635 return fmt.Errorf("failed to get high watermark for cagg %s: %w", cagg.Name, err) 636 } 637 638 if highWatermark.Before(time.UnixMilli(0)) { 639 // No cagg has been calculated yet, skip 640 continue 641 } 642 643 toString, err := toStoredProcTimestampArg(highWatermark) 644 if err != nil { 645 return fmt.Errorf("failed to convert from timestamp %s to postgres string: %w", highWatermark, err) 646 } 647 648 // Truncate the from Time down to nearest complete cagg boundary 649 fromTime := fromBlock.VegaTime.Truncate(cagg.BucketInterval) 650 651 // When calling `refresh_continuous_aggregate` the refresh interval needs to be at least 2 times the bucket interval 652 if fromTime.Sub(highWatermark) < 2*cagg.BucketInterval { 653 fromTime = highWatermark.Add(-2 * cagg.BucketInterval) 654 } 655 fromString, err := toStoredProcTimestampArg(fromTime) 656 if err != nil { 657 return fmt.Errorf("failed to convert from timestamp %s to postgres string: %w", fromTime, err) 658 } 659 660 _, err = conn.Exec(ctx, fmt.Sprintf("CALL refresh_continuous_aggregate('%s', %s, %s);;", cagg.Name, fromString, toString)) 661 if err != nil { 662 return fmt.Errorf("failed to refresh continuous aggregate %s: %w", cagg.Name, err) 663 } 664 } 665 666 return nil 667 } 668 669 func getHighwaterMarkForCagg(ctx context.Context, conn *pgxpool.Pool, cagg ContinuousAggregateMetaData) (time.Time, error) { 670 query := fmt.Sprintf(`SELECT COALESCE( 671 _timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(%d)), 672 '-infinity'::timestamp with time zone);`, cagg.ID) 673 row := conn.QueryRow(ctx, query) 674 675 var highWatermark time.Time 676 err := row.Scan(&highWatermark) 677 if err != nil { 678 return time.Time{}, fmt.Errorf("failed to get high water mark: %w", err) 679 } 680 return highWatermark, nil 681 } 682 683 func UpdateContinuousAggregateDataFromHighWaterMark(ctx context.Context, conn *pgxpool.Pool, toHeight int64) error { 684 toBlock, err := sqlstore.GetAtHeightUsingConnection(ctx, conn, toHeight) 685 if err != nil { 686 return fmt.Errorf("failed to get to block: %w", err) 687 } 688 689 dbMetaData, err := NewDatabaseMetaData(ctx, conn) 690 if err != nil { 691 return fmt.Errorf("failed to get database meta data: %w", err) 692 } 693 694 for _, cagg := range dbMetaData.ContinuousAggregatesMetaData { 695 var err error 696 highWatermark, err := getHighwaterMarkForCagg(ctx, conn, cagg) 697 if err != nil { 698 return fmt.Errorf("failed to get high watermark for cagg %s: %w", cagg.Name, err) 699 } 700 701 // Truncate the toTime down to latest complete cagg boundary 702 toTime := toBlock.VegaTime.Truncate(cagg.BucketInterval) 703 704 // When calling `refresh_continuous_aggregate` the refresh interval needs to be at least 2 times the bucket interval 705 if toTime.Sub(highWatermark) < 2*cagg.BucketInterval { 706 continue 707 } 708 709 fromString := "NULL" 710 if highWatermark.After(time.UnixMilli(0)) { 711 fromString, err = toStoredProcTimestampArg(highWatermark) 712 if err != nil { 713 return fmt.Errorf("failed to convert from timestamp %s to postgres string: %w", highWatermark, err) 714 } 715 } 716 717 toString, err := toStoredProcTimestampArg(toTime) 718 if err != nil { 719 return fmt.Errorf("failed to convert to timestamp %s to postgres string: %w", toTime, err) 720 } 721 722 _, err = conn.Exec(ctx, fmt.Sprintf("CALL refresh_continuous_aggregate('%s', %s, %s);;", cagg.Name, fromString, toString)) 723 if err != nil { 724 return fmt.Errorf("failed to refresh continuous aggregate %s: %w", cagg.Name, err) 725 } 726 } 727 728 return nil 729 } 730 731 func toStoredProcTimestampArg(from time.Time) (string, error) { 732 t := pgtype.Timestamp{} 733 t.Set(from) 734 fromBytes, err := t.EncodeText(nil, []byte{}) 735 if err != nil { 736 if err != nil { 737 return "", fmt.Errorf("failed to encode time: %w", err) 738 } 739 } 740 741 fromString := string(fromBytes) 742 return "'" + fromString + "'", nil 743 }