github.com/stellar/stellar-etl@v1.0.1-0.20240312145900-4874b6bf2b89/cmd/export_diagnostic_events.go (about)

     1  package cmd
     2  
     3  import (
     4  	"fmt"
     5  
     6  	"github.com/sirupsen/logrus"
     7  	"github.com/spf13/cobra"
     8  	"github.com/stellar/stellar-etl/internal/input"
     9  	"github.com/stellar/stellar-etl/internal/transform"
    10  	"github.com/stellar/stellar-etl/internal/utils"
    11  )
    12  
    13  var diagnosticEventsCmd = &cobra.Command{
    14  	Use:   "export_diagnostic_events",
    15  	Short: "Exports the diagnostic events over a specified range.",
    16  	Long:  `Exports the diagnostic events over a specified range to an output file.`,
    17  	Run: func(cmd *cobra.Command, args []string) {
    18  		cmdLogger.SetLevel(logrus.InfoLevel)
    19  		endNum, strictExport, isTest, isFuture, extra := utils.MustCommonFlags(cmd.Flags(), cmdLogger)
    20  		cmdLogger.StrictExport = strictExport
    21  		startNum, path, limit := utils.MustArchiveFlags(cmd.Flags(), cmdLogger)
    22  		cloudStorageBucket, cloudCredentials, cloudProvider := utils.MustCloudStorageFlags(cmd.Flags(), cmdLogger)
    23  		env := utils.GetEnvironmentDetails(isTest, isFuture)
    24  
    25  		transactions, err := input.GetTransactions(startNum, endNum, limit, env)
    26  		if err != nil {
    27  			cmdLogger.Fatal("could not read transactions: ", err)
    28  		}
    29  
    30  		outFile := mustOutFile(path)
    31  		numFailures := 0
    32  		for _, transformInput := range transactions {
    33  			transformed, err, ok := transform.TransformDiagnosticEvent(transformInput.Transaction, transformInput.LedgerHistory)
    34  			if err != nil {
    35  				ledgerSeq := transformInput.LedgerHistory.Header.LedgerSeq
    36  				cmdLogger.LogError(fmt.Errorf("could not transform diagnostic events in transaction %d in ledger %d: ", transformInput.Transaction.Index, ledgerSeq))
    37  				numFailures += 1
    38  				continue
    39  			}
    40  
    41  			if !ok {
    42  				continue
    43  			}
    44  			for _, diagnosticEvent := range transformed {
    45  				_, err := exportEntry(diagnosticEvent, outFile, extra)
    46  				if err != nil {
    47  					cmdLogger.LogError(fmt.Errorf("could not export diagnostic event: %v", err))
    48  					numFailures += 1
    49  					continue
    50  				}
    51  			}
    52  		}
    53  
    54  		outFile.Close()
    55  
    56  		printTransformStats(len(transactions), numFailures)
    57  
    58  		maybeUpload(cloudCredentials, cloudStorageBucket, cloudProvider, path)
    59  	},
    60  }
    61  
    62  func init() {
    63  	rootCmd.AddCommand(diagnosticEventsCmd)
    64  	utils.AddCommonFlags(diagnosticEventsCmd.Flags())
    65  	utils.AddArchiveFlags("diagnostic_events", diagnosticEventsCmd.Flags())
    66  	utils.AddCloudStorageFlags(diagnosticEventsCmd.Flags())
    67  	diagnosticEventsCmd.MarkFlagRequired("end-ledger")
    68  
    69  	/*
    70  		Current flags:
    71  			start-ledger: the ledger sequence number for the beginning of the export period
    72  			end-ledger: the ledger sequence number for the end of the export range (*required)
    73  
    74  			limit: maximum number of diagnostic events to export
    75  				TODO: measure a good default value that ensures all diagnostic events within a 5 minute period will be exported with a single call
    76  				The current max_tx_set_size is 1000 and there are 60 new ledgers in a 5 minute period:
    77  					1000*60 = 60000
    78  
    79  			output-file: filename of the output file
    80  
    81  		TODO: implement extra flags if possible
    82  			serialize-method: the method for serialization of the output data (JSON, XDR, etc)
    83  			start and end time as a replacement for start and end sequence numbers
    84  	*/
    85  }