github.com/TrueBlocks/trueblocks-core/src/apps/chifra@v0.0.0-20241022031540-b362680128f7/internal/scrape/handle_show.go (about)

     1  package scrapePkg
     2  
     3  // Copyright 2021 The TrueBlocks Authors. All rights reserved.
     4  // Use of this source code is governed by a license that can
     5  // be found in the LICENSE file.
     6  
     7  import (
     8  	"context"
     9  	"fmt"
    10  	"os"
    11  	"strings"
    12  
    13  	"github.com/TrueBlocks/trueblocks-core/src/apps/chifra/pkg/base"
    14  	"github.com/TrueBlocks/trueblocks-core/src/apps/chifra/pkg/colors"
    15  	"github.com/TrueBlocks/trueblocks-core/src/apps/chifra/pkg/config"
    16  	"github.com/TrueBlocks/trueblocks-core/src/apps/chifra/pkg/file"
    17  	"github.com/TrueBlocks/trueblocks-core/src/apps/chifra/pkg/logger"
    18  	"github.com/TrueBlocks/trueblocks-core/src/apps/chifra/pkg/output"
    19  	"github.com/TrueBlocks/trueblocks-core/src/apps/chifra/pkg/sigintTrap"
    20  	"github.com/TrueBlocks/trueblocks-core/src/apps/chifra/pkg/tslib"
    21  	"github.com/TrueBlocks/trueblocks-core/src/apps/chifra/pkg/types"
    22  )
    23  
    24  // criticalError means that we have to stop scraper
    25  type criticalError struct {
    26  	err error
    27  }
    28  
    29  func (c *criticalError) Error() string {
    30  	return c.err.Error()
    31  }
    32  
    33  func NewCriticalError(err error) *criticalError {
    34  	return &criticalError{
    35  		err,
    36  	}
    37  }
    38  
    39  // HandleScrape enters a forever loop and continually scrapes --block_cnt blocks
    40  // (or less if close to the head). The forever loop pauses each round for
    41  // --sleep seconds (or, if not close to the head, for .25 seconds).
    42  func (opts *ScrapeOptions) HandleScrape(rCtx *output.RenderCtx) error {
    43  	chain := opts.Globals.Chain
    44  	testMode := opts.Globals.TestMode
    45  	defer func() {
    46  		pidPath := opts.getPidFilePath()
    47  		_ = os.Remove(pidPath)
    48  	}()
    49  
    50  	path := config.PathToIndex(chain)
    51  	provider := config.GetChain(chain).RpcProvider
    52  	if testMode {
    53  		path = "--unchained-path--"
    54  		provider = "--rpc-provider--"
    55  	}
    56  
    57  	isHeadless := os.Getenv("TB_NODE_HEADLESS") == "true"
    58  
    59  	msg1 := fmt.Sprintf("Scraping %s", chain)
    60  	msg2 := fmt.Sprintf("  Rpc %s", provider)
    61  	msg3 := fmt.Sprintf("  Path %s", path)
    62  	msg4 := fmt.Sprintf("  Settings %v", config.GetScrape(chain))
    63  
    64  	if opts.DryRun {
    65  		opts.Globals.NoHeader = true
    66  		fetchData := func(modelChan chan types.Modeler, errorChan chan error) {
    67  			nl := "\n"
    68  			modelChan <- &types.Message{Msg: msg1 + nl + msg2 + nl + msg3 + nl + msg4}
    69  		}
    70  		return output.StreamMany(rCtx, fetchData, opts.Globals.OutputOpts())
    71  	} else {
    72  		if !isHeadless {
    73  			logger.Info(msg1)
    74  			logger.Info(msg2)
    75  			logger.Info(msg3)
    76  			logger.Info(msg4)
    77  		}
    78  	}
    79  
    80  	// Handle Ctr-C, docker stop and docker compose down (provided they
    81  	// send SIGINT)
    82  	sigintCtx, cancel := context.WithCancel(context.Background())
    83  	cleanOnQuit := func() {
    84  		// We only print a warning here, as the scrape.pid file will be
    85  		// removed by the deferred function
    86  		logger.Warn(sigintTrap.TrapMessage)
    87  	}
    88  	trapChannel := sigintTrap.Enable(sigintCtx, cancel, cleanOnQuit)
    89  	defer sigintTrap.Disable(trapChannel)
    90  
    91  	var blocks = make([]base.Blknum, 0, opts.BlockCnt)
    92  	var err error
    93  
    94  	// Clean the temporary files and makes sure block zero has been processed
    95  	if ok, err := opts.Prepare(); !ok || err != nil {
    96  		return err
    97  	}
    98  
    99  	runCount := uint64(0)
   100  	// Loop until the user hits Cntl+C, until runCount runs out, or until
   101  	// the server tells us to stop.
   102  	for {
   103  		if sigintCtx.Err() != nil {
   104  			// This means the context got cancelled, i.e. we got a SIGINT.
   105  			return nil
   106  		}
   107  
   108  		// We create a new manager for each loop...we will populate it in a minute...
   109  		bm := BlazeManager{
   110  			chain: chain,
   111  		}
   112  
   113  		// Fetch the meta data which tells us how far along the index is.
   114  		if bm.meta, err = opts.Conn.GetMetaData(testMode); err != nil {
   115  			var ErrFetchingMeta = fmt.Errorf("error fetching meta data: %s", err)
   116  			logger.Error(colors.BrightRed+ErrFetchingMeta.Error(), colors.Off)
   117  			goto PAUSE
   118  		}
   119  
   120  		// This only happens if the chain and the index scraper are both started at the
   121  		// same time (rarely). This protects against the case where the chain has no ripe blocks.
   122  		// Report no error and sleep for a while.
   123  		if bm.meta.ChainHeight() < base.Blknum(opts.Settings.UnripeDist) {
   124  			goto PAUSE
   125  		}
   126  
   127  		// Another rare case, but here the user has reset his/her node but not removed
   128  		// the index. In this case, the index is ahead of the chain. We go to sleep and
   129  		// try again later in the hopes that the chain catches up.
   130  		if !opts.DryRun && bm.meta.NextIndexHeight() > bm.meta.ChainHeight()+1 {
   131  			var ErrIndexAhead = fmt.Errorf(
   132  				"index (%d) is ahead of chain (%d)",
   133  				bm.meta.NextIndexHeight(),
   134  				bm.meta.ChainHeight(),
   135  			)
   136  			logger.Error(colors.BrightRed+ErrIndexAhead.Error(), colors.Off)
   137  			goto PAUSE
   138  		}
   139  
   140  		// Let's start a new round...
   141  		bm = BlazeManager{
   142  			chain:        chain,
   143  			opts:         opts,
   144  			nRipe:        0,
   145  			nUnripe:      0,
   146  			timestamps:   make(map[base.Blknum]tslib.TimestampRecord, opts.BlockCnt),
   147  			processedMap: make(map[base.Blknum]bool, opts.BlockCnt),
   148  			meta:         bm.meta,
   149  			nChannels:    int(opts.Settings.ChannelCount),
   150  			isHeadless:   isHeadless,
   151  		}
   152  
   153  		// Order dependant, be careful!
   154  		// first block to scrape (one past end of previous round).
   155  		bm.startBlock = bm.meta.NextIndexHeight()
   156  		// if opts.StartBlock != 0 {
   157  		// 	bm.startBlock = opts.StartBlock
   158  		// }
   159  
   160  		// user supplied, but not so many to pass the chain tip.
   161  		bm.blockCount = base.Min(base.Blknum(opts.BlockCnt), bm.meta.ChainHeight()-bm.StartBlock()+1)
   162  		// Unripe_dist behind the chain tip.
   163  		bm.ripeBlock = bm.meta.ChainHeight() - base.Blknum(opts.Settings.UnripeDist)
   164  
   165  		// These are the blocks we're going to process this round
   166  		blocks = make([]base.Blknum, 0, bm.BlockCount())
   167  		for block := bm.StartBlock(); block < bm.EndBlock(); block++ {
   168  			blocks = append(blocks, block)
   169  		}
   170  
   171  		if len(blocks) == 0 {
   172  			logger.Info("no blocks to scrape")
   173  			goto PAUSE
   174  		}
   175  
   176  		if opts.Globals.Verbose {
   177  			logger.Info("chain head:           ", bm.meta.ChainHeight())
   178  			logger.Info("opts.BlockCnt:        ", opts.BlockCnt)
   179  			logger.Info("ripe block:           ", bm.ripeBlock)
   180  			logger.Info("perChunk:             ", bm.PerChunk())
   181  			logger.Info("start block:          ", bm.StartBlock())
   182  			logger.Info("block count:          ", bm.BlockCount())
   183  			logger.Info("len(blocks):          ", len(blocks))
   184  			if len(blocks) > 0 {
   185  				logger.Info("blocks[0]:            ", blocks[0])
   186  				logger.Info("blocks[len(blocks)-1]:", blocks[len(blocks)-1])
   187  			}
   188  		}
   189  
   190  		// Scrape this round. Only quit on catostrophic errors. Report and sleep otherwise.
   191  		if err = bm.ScrapeBatch(sigintCtx, blocks); err != nil || sigintCtx.Err() != nil {
   192  			if err != nil {
   193  				logger.Error(colors.BrightRed+err.Error(), colors.Off)
   194  			}
   195  			if sigintCtx.Err() != nil {
   196  				break
   197  			}
   198  			goto PAUSE
   199  		}
   200  
   201  		if bm.nRipe == 0 {
   202  			if !bm.isHeadless {
   203  				logger.Info(colors.Green+"no ripe files to consolidate on chain", chain, strings.Repeat(" ", 40), colors.Off)
   204  			}
   205  			goto PAUSE
   206  
   207  		} else {
   208  			// Consilidate a chunk (if possible). Only quit on catostrophic errors. Report and sleep otherwise.
   209  			if err = bm.Consolidate(sigintCtx, blocks); err != nil || sigintCtx.Err() != nil {
   210  				if err != nil {
   211  					logger.Error(colors.BrightRed+err.Error(), colors.Off)
   212  				}
   213  				_, critical := err.(*criticalError)
   214  				if critical || sigintCtx.Err() != nil {
   215  					break
   216  				}
   217  				goto PAUSE
   218  			}
   219  		}
   220  
   221  	PAUSE:
   222  		runCount++
   223  		if opts.RunCount != 0 && runCount >= opts.RunCount {
   224  			// No reason to clean up here. Next round will do so and user can use these files in the meantime.
   225  			if !isHeadless {
   226  				logger.Info("run count reached")
   227  			}
   228  			break
   229  		}
   230  
   231  		// sleep for a bit (there's no new blocks anyway if we're caught up).
   232  		distanceFromHead := base.Blknum(28)
   233  		if bm.meta != nil { // it may be nil if the node died
   234  			distanceFromHead = bm.meta.ChainHeight() - bm.meta.StageHeight()
   235  		}
   236  		opts.pause(sigintCtx, distanceFromHead)
   237  		if sigintCtx.Err() != nil {
   238  			return nil
   239  		}
   240  
   241  		// defensive programming - just double checking our own understanding...
   242  		count := file.NFilesInFolder(bm.RipeFolder())
   243  		if count != 0 {
   244  			_ = cleanEphemeralIndexFolders(chain)
   245  			err := fmt.Errorf("%d unexpected ripe files in %s", count, bm.RipeFolder())
   246  			logger.Error(colors.BrightRed+err.Error(), colors.Off)
   247  		}
   248  
   249  		// We want to clean up the unripe files. The chain may have (it frequently does)
   250  		// re-orged. We want to re-qeury these next round. This is why we have an unripePath.
   251  		if err = os.RemoveAll(bm.UnripeFolder()); err != nil {
   252  			logger.Error(colors.BrightRed, err, colors.Off)
   253  			return err
   254  		}
   255  	}
   256  
   257  	// We've left the loop and we're done.
   258  	return nil
   259  }
   260  
   261  var spaces = strings.Repeat(" ", 50)
   262  
   263  // cleanEphemeralIndexFolders removes files in ripe and unripe
   264  func cleanEphemeralIndexFolders(chain string) error {
   265  	return file.CleanFolder(chain, config.PathToIndex(chain), []string{"ripe", "unripe"})
   266  }
   267  
   268  func (opts *ScrapeOptions) HandleShow(rCtx *output.RenderCtx) error {
   269  	// Note this never returns
   270  	return opts.HandleScrape(rCtx)
   271  }