github.com/MetalBlockchain/metalgo@v1.11.9/chains/manager.go (about)

     1  // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package chains
     5  
     6  import (
     7  	"context"
     8  	"crypto"
     9  	"errors"
    10  	"fmt"
    11  	"os"
    12  	"path/filepath"
    13  	"sync"
    14  	"time"
    15  
    16  	"go.uber.org/zap"
    17  
    18  	"github.com/MetalBlockchain/metalgo/api/health"
    19  	"github.com/MetalBlockchain/metalgo/api/keystore"
    20  	"github.com/MetalBlockchain/metalgo/api/metrics"
    21  	"github.com/MetalBlockchain/metalgo/api/server"
    22  	"github.com/MetalBlockchain/metalgo/chains/atomic"
    23  	"github.com/MetalBlockchain/metalgo/database"
    24  	"github.com/MetalBlockchain/metalgo/database/meterdb"
    25  	"github.com/MetalBlockchain/metalgo/database/prefixdb"
    26  	"github.com/MetalBlockchain/metalgo/ids"
    27  	"github.com/MetalBlockchain/metalgo/message"
    28  	"github.com/MetalBlockchain/metalgo/network"
    29  	"github.com/MetalBlockchain/metalgo/network/p2p"
    30  	"github.com/MetalBlockchain/metalgo/snow"
    31  	"github.com/MetalBlockchain/metalgo/snow/engine/avalanche/bootstrap/queue"
    32  	"github.com/MetalBlockchain/metalgo/snow/engine/avalanche/state"
    33  	"github.com/MetalBlockchain/metalgo/snow/engine/avalanche/vertex"
    34  	"github.com/MetalBlockchain/metalgo/snow/engine/common"
    35  	"github.com/MetalBlockchain/metalgo/snow/engine/common/tracker"
    36  	"github.com/MetalBlockchain/metalgo/snow/engine/snowman/block"
    37  	"github.com/MetalBlockchain/metalgo/snow/engine/snowman/syncer"
    38  	"github.com/MetalBlockchain/metalgo/snow/networking/handler"
    39  	"github.com/MetalBlockchain/metalgo/snow/networking/router"
    40  	"github.com/MetalBlockchain/metalgo/snow/networking/sender"
    41  	"github.com/MetalBlockchain/metalgo/snow/networking/timeout"
    42  	"github.com/MetalBlockchain/metalgo/snow/validators"
    43  	"github.com/MetalBlockchain/metalgo/staking"
    44  	"github.com/MetalBlockchain/metalgo/subnets"
    45  	"github.com/MetalBlockchain/metalgo/trace"
    46  	"github.com/MetalBlockchain/metalgo/utils/buffer"
    47  	"github.com/MetalBlockchain/metalgo/utils/constants"
    48  	"github.com/MetalBlockchain/metalgo/utils/crypto/bls"
    49  	"github.com/MetalBlockchain/metalgo/utils/logging"
    50  	"github.com/MetalBlockchain/metalgo/utils/metric"
    51  	"github.com/MetalBlockchain/metalgo/utils/perms"
    52  	"github.com/MetalBlockchain/metalgo/utils/set"
    53  	"github.com/MetalBlockchain/metalgo/version"
    54  	"github.com/MetalBlockchain/metalgo/vms"
    55  	"github.com/MetalBlockchain/metalgo/vms/fx"
    56  	"github.com/MetalBlockchain/metalgo/vms/metervm"
    57  	"github.com/MetalBlockchain/metalgo/vms/nftfx"
    58  	"github.com/MetalBlockchain/metalgo/vms/platformvm/warp"
    59  	"github.com/MetalBlockchain/metalgo/vms/propertyfx"
    60  	"github.com/MetalBlockchain/metalgo/vms/proposervm"
    61  	"github.com/MetalBlockchain/metalgo/vms/secp256k1fx"
    62  	"github.com/MetalBlockchain/metalgo/vms/tracedvm"
    63  
    64  	p2ppb "github.com/MetalBlockchain/metalgo/proto/pb/p2p"
    65  	smcon "github.com/MetalBlockchain/metalgo/snow/consensus/snowman"
    66  	aveng "github.com/MetalBlockchain/metalgo/snow/engine/avalanche"
    67  	avbootstrap "github.com/MetalBlockchain/metalgo/snow/engine/avalanche/bootstrap"
    68  	avagetter "github.com/MetalBlockchain/metalgo/snow/engine/avalanche/getter"
    69  	smeng "github.com/MetalBlockchain/metalgo/snow/engine/snowman"
    70  	smbootstrap "github.com/MetalBlockchain/metalgo/snow/engine/snowman/bootstrap"
    71  	snowgetter "github.com/MetalBlockchain/metalgo/snow/engine/snowman/getter"
    72  	timetracker "github.com/MetalBlockchain/metalgo/snow/networking/tracker"
    73  )
    74  
    75  const (
    76  	ChainLabel = "chain"
    77  
    78  	defaultChannelSize = 1
    79  	initialQueueSize   = 3
    80  
    81  	avalancheNamespace    = constants.PlatformName + metric.NamespaceSeparator + "avalanche"
    82  	handlerNamespace      = constants.PlatformName + metric.NamespaceSeparator + "handler"
    83  	meterchainvmNamespace = constants.PlatformName + metric.NamespaceSeparator + "meterchainvm"
    84  	meterdagvmNamespace   = constants.PlatformName + metric.NamespaceSeparator + "meterdagvm"
    85  	proposervmNamespace   = constants.PlatformName + metric.NamespaceSeparator + "proposervm"
    86  	p2pNamespace          = constants.PlatformName + metric.NamespaceSeparator + "p2p"
    87  	snowmanNamespace      = constants.PlatformName + metric.NamespaceSeparator + "snowman"
    88  	stakeNamespace        = constants.PlatformName + metric.NamespaceSeparator + "stake"
    89  )
    90  
    91  var (
    92  	// Commonly shared VM DB prefix
    93  	VMDBPrefix = []byte("vm")
    94  
    95  	// Bootstrapping prefixes for LinearizableVMs
    96  	VertexDBPrefix              = []byte("vertex")
    97  	VertexBootstrappingDBPrefix = []byte("vertex_bs")
    98  	TxBootstrappingDBPrefix     = []byte("tx_bs")
    99  	BlockBootstrappingDBPrefix  = []byte("interval_block_bs")
   100  
   101  	// Bootstrapping prefixes for ChainVMs
   102  	ChainBootstrappingDBPrefix = []byte("interval_bs")
   103  
   104  	errUnknownVMType           = errors.New("the vm should have type avalanche.DAGVM or snowman.ChainVM")
   105  	errCreatePlatformVM        = errors.New("attempted to create a chain running the PlatformVM")
   106  	errNotBootstrapped         = errors.New("subnets not bootstrapped")
   107  	errPartialSyncAsAValidator = errors.New("partial sync should not be configured for a validator")
   108  
   109  	fxs = map[ids.ID]fx.Factory{
   110  		secp256k1fx.ID: &secp256k1fx.Factory{},
   111  		nftfx.ID:       &nftfx.Factory{},
   112  		propertyfx.ID:  &propertyfx.Factory{},
   113  	}
   114  
   115  	_ Manager = (*manager)(nil)
   116  )
   117  
   118  // Manager manages the chains running on this node.
   119  // It can:
   120  //   - Create a chain
   121  //   - Add a registrant. When a chain is created, each registrant calls
   122  //     RegisterChain with the new chain as the argument.
   123  //   - Manage the aliases of chains
   124  type Manager interface {
   125  	ids.Aliaser
   126  
   127  	// Queues a chain to be created in the future after chain creator is unblocked.
   128  	// This is only called from the P-chain thread to create other chains
   129  	// Queued chains are created only after P-chain is bootstrapped.
   130  	// This assumes only chains in tracked subnets are queued.
   131  	QueueChainCreation(ChainParameters)
   132  
   133  	// Add a registrant [r]. Every time a chain is
   134  	// created, [r].RegisterChain([new chain]) is called.
   135  	AddRegistrant(Registrant)
   136  
   137  	// Given an alias, return the ID of the chain associated with that alias
   138  	Lookup(string) (ids.ID, error)
   139  
   140  	// Given an alias, return the ID of the VM associated with that alias
   141  	LookupVM(string) (ids.ID, error)
   142  
   143  	// Returns true iff the chain with the given ID exists and is finished bootstrapping
   144  	IsBootstrapped(ids.ID) bool
   145  
   146  	// Starts the chain creator with the initial platform chain parameters, must
   147  	// be called once.
   148  	StartChainCreator(platformChain ChainParameters) error
   149  
   150  	Shutdown()
   151  }
   152  
   153  // ChainParameters defines the chain being created
   154  type ChainParameters struct {
   155  	// The ID of the chain being created.
   156  	ID ids.ID
   157  	// ID of the subnet that validates this chain.
   158  	SubnetID ids.ID
   159  	// The genesis data of this chain's ledger.
   160  	GenesisData []byte
   161  	// The ID of the vm this chain is running.
   162  	VMID ids.ID
   163  	// The IDs of the feature extensions this chain is running.
   164  	FxIDs []ids.ID
   165  	// Invariant: Only used when [ID] is the P-chain ID.
   166  	CustomBeacons validators.Manager
   167  }
   168  
   169  type chain struct {
   170  	Name    string
   171  	Context *snow.ConsensusContext
   172  	VM      common.VM
   173  	Handler handler.Handler
   174  }
   175  
   176  // ChainConfig is configuration settings for the current execution.
   177  // [Config] is the user-provided config blob for the chain.
   178  // [Upgrade] is a chain-specific blob for coordinating upgrades.
   179  type ChainConfig struct {
   180  	Config  []byte
   181  	Upgrade []byte
   182  }
   183  
   184  type ManagerConfig struct {
   185  	SybilProtectionEnabled bool
   186  	StakingTLSSigner       crypto.Signer
   187  	StakingTLSCert         *staking.Certificate
   188  	StakingBLSKey          *bls.SecretKey
   189  	TracingEnabled         bool
   190  	// Must not be used unless [TracingEnabled] is true as this may be nil.
   191  	Tracer                    trace.Tracer
   192  	Log                       logging.Logger
   193  	LogFactory                logging.Factory
   194  	VMManager                 vms.Manager // Manage mappings from vm ID --> vm
   195  	BlockAcceptorGroup        snow.AcceptorGroup
   196  	TxAcceptorGroup           snow.AcceptorGroup
   197  	VertexAcceptorGroup       snow.AcceptorGroup
   198  	DB                        database.Database
   199  	MsgCreator                message.OutboundMsgBuilder // message creator, shared with network
   200  	Router                    router.Router              // Routes incoming messages to the appropriate chain
   201  	Net                       network.Network            // Sends consensus messages to other validators
   202  	Validators                validators.Manager         // Validators validating on this chain
   203  	NodeID                    ids.NodeID                 // The ID of this node
   204  	NetworkID                 uint32                     // ID of the network this node is connected to
   205  	PartialSyncPrimaryNetwork bool
   206  	Server                    server.Server // Handles HTTP API calls
   207  	Keystore                  keystore.Keystore
   208  	AtomicMemory              *atomic.Memory
   209  	AVAXAssetID               ids.ID
   210  	XChainID                  ids.ID          // ID of the X-Chain,
   211  	CChainID                  ids.ID          // ID of the C-Chain,
   212  	CriticalChains            set.Set[ids.ID] // Chains that can't exit gracefully
   213  	TimeoutManager            timeout.Manager // Manages request timeouts when sending messages to other validators
   214  	Health                    health.Registerer
   215  	SubnetConfigs             map[ids.ID]subnets.Config // ID -> SubnetConfig
   216  	ChainConfigs              map[string]ChainConfig    // alias -> ChainConfig
   217  	// ShutdownNodeFunc allows the chain manager to issue a request to shutdown the node
   218  	ShutdownNodeFunc func(exitCode int)
   219  	MeterVMEnabled   bool // Should each VM be wrapped with a MeterVM
   220  
   221  	Metrics        metrics.MultiGatherer
   222  	MeterDBMetrics metrics.MultiGatherer
   223  
   224  	FrontierPollFrequency   time.Duration
   225  	ConsensusAppConcurrency int
   226  
   227  	// Max Time to spend fetching a container and its
   228  	// ancestors when responding to a GetAncestors
   229  	BootstrapMaxTimeGetAncestors time.Duration
   230  	// Max number of containers in an ancestors message sent by this node.
   231  	BootstrapAncestorsMaxContainersSent int
   232  	// This node will only consider the first [AncestorsMaxContainersReceived]
   233  	// containers in an ancestors message it receives.
   234  	BootstrapAncestorsMaxContainersReceived int
   235  
   236  	ApricotPhase4Time            time.Time
   237  	ApricotPhase4MinPChainHeight uint64
   238  
   239  	// Tracks CPU/disk usage caused by each peer.
   240  	ResourceTracker timetracker.ResourceTracker
   241  
   242  	StateSyncBeacons []ids.NodeID
   243  
   244  	ChainDataDir string
   245  
   246  	Subnets *Subnets
   247  }
   248  
   249  type manager struct {
   250  	// Note: The string representation of a chain's ID is also considered to be an alias of the chain
   251  	// That is, [chainID].String() is an alias for the chain, too
   252  	ids.Aliaser
   253  	ManagerConfig
   254  
   255  	// Those notified when a chain is created
   256  	registrants []Registrant
   257  
   258  	// queue that holds chain create requests
   259  	chainsQueue buffer.BlockingDeque[ChainParameters]
   260  	// unblocks chain creator to start processing the queue
   261  	unblockChainCreatorCh chan struct{}
   262  	// shutdown the chain creator goroutine if the queue hasn't started to be
   263  	// processed.
   264  	chainCreatorShutdownCh chan struct{}
   265  	chainCreatorExited     sync.WaitGroup
   266  
   267  	chainsLock sync.Mutex
   268  	// Key: Chain's ID
   269  	// Value: The chain
   270  	chains map[ids.ID]handler.Handler
   271  
   272  	// snowman++ related interface to allow validators retrieval
   273  	validatorState validators.State
   274  
   275  	avalancheGatherer    metrics.MultiGatherer            // chainID
   276  	handlerGatherer      metrics.MultiGatherer            // chainID
   277  	meterChainVMGatherer metrics.MultiGatherer            // chainID
   278  	meterDAGVMGatherer   metrics.MultiGatherer            // chainID
   279  	proposervmGatherer   metrics.MultiGatherer            // chainID
   280  	p2pGatherer          metrics.MultiGatherer            // chainID
   281  	snowmanGatherer      metrics.MultiGatherer            // chainID
   282  	stakeGatherer        metrics.MultiGatherer            // chainID
   283  	vmGatherer           map[ids.ID]metrics.MultiGatherer // vmID -> chainID
   284  }
   285  
   286  // New returns a new Manager
   287  func New(config *ManagerConfig) (Manager, error) {
   288  	avalancheGatherer := metrics.NewLabelGatherer(ChainLabel)
   289  	if err := config.Metrics.Register(avalancheNamespace, avalancheGatherer); err != nil {
   290  		return nil, err
   291  	}
   292  
   293  	handlerGatherer := metrics.NewLabelGatherer(ChainLabel)
   294  	if err := config.Metrics.Register(handlerNamespace, handlerGatherer); err != nil {
   295  		return nil, err
   296  	}
   297  
   298  	meterChainVMGatherer := metrics.NewLabelGatherer(ChainLabel)
   299  	if err := config.Metrics.Register(meterchainvmNamespace, meterChainVMGatherer); err != nil {
   300  		return nil, err
   301  	}
   302  
   303  	meterDAGVMGatherer := metrics.NewLabelGatherer(ChainLabel)
   304  	if err := config.Metrics.Register(meterdagvmNamespace, meterDAGVMGatherer); err != nil {
   305  		return nil, err
   306  	}
   307  
   308  	proposervmGatherer := metrics.NewLabelGatherer(ChainLabel)
   309  	if err := config.Metrics.Register(proposervmNamespace, proposervmGatherer); err != nil {
   310  		return nil, err
   311  	}
   312  
   313  	p2pGatherer := metrics.NewLabelGatherer(ChainLabel)
   314  	if err := config.Metrics.Register(p2pNamespace, p2pGatherer); err != nil {
   315  		return nil, err
   316  	}
   317  
   318  	snowmanGatherer := metrics.NewLabelGatherer(ChainLabel)
   319  	if err := config.Metrics.Register(snowmanNamespace, snowmanGatherer); err != nil {
   320  		return nil, err
   321  	}
   322  
   323  	stakeGatherer := metrics.NewLabelGatherer(ChainLabel)
   324  	if err := config.Metrics.Register(stakeNamespace, stakeGatherer); err != nil {
   325  		return nil, err
   326  	}
   327  
   328  	return &manager{
   329  		Aliaser:                ids.NewAliaser(),
   330  		ManagerConfig:          *config,
   331  		chains:                 make(map[ids.ID]handler.Handler),
   332  		chainsQueue:            buffer.NewUnboundedBlockingDeque[ChainParameters](initialQueueSize),
   333  		unblockChainCreatorCh:  make(chan struct{}),
   334  		chainCreatorShutdownCh: make(chan struct{}),
   335  
   336  		avalancheGatherer:    avalancheGatherer,
   337  		handlerGatherer:      handlerGatherer,
   338  		meterChainVMGatherer: meterChainVMGatherer,
   339  		meterDAGVMGatherer:   meterDAGVMGatherer,
   340  		proposervmGatherer:   proposervmGatherer,
   341  		p2pGatherer:          p2pGatherer,
   342  		snowmanGatherer:      snowmanGatherer,
   343  		stakeGatherer:        stakeGatherer,
   344  		vmGatherer:           make(map[ids.ID]metrics.MultiGatherer),
   345  	}, nil
   346  }
   347  
   348  // QueueChainCreation queues a chain creation request
   349  // Invariant: Tracked Subnet must be checked before calling this function
   350  func (m *manager) QueueChainCreation(chainParams ChainParameters) {
   351  	if sb, _ := m.Subnets.GetOrCreate(chainParams.SubnetID); !sb.AddChain(chainParams.ID) {
   352  		m.Log.Debug("skipping chain creation",
   353  			zap.String("reason", "chain already staged"),
   354  			zap.Stringer("subnetID", chainParams.SubnetID),
   355  			zap.Stringer("chainID", chainParams.ID),
   356  			zap.Stringer("vmID", chainParams.VMID),
   357  		)
   358  		return
   359  	}
   360  
   361  	if ok := m.chainsQueue.PushRight(chainParams); !ok {
   362  		m.Log.Warn("skipping chain creation",
   363  			zap.String("reason", "couldn't enqueue chain"),
   364  			zap.Stringer("subnetID", chainParams.SubnetID),
   365  			zap.Stringer("chainID", chainParams.ID),
   366  			zap.Stringer("vmID", chainParams.VMID),
   367  		)
   368  	}
   369  }
   370  
   371  // createChain creates and starts the chain
   372  //
   373  // Note: it is expected for the subnet to already have the chain registered as
   374  // bootstrapping before this function is called
   375  func (m *manager) createChain(chainParams ChainParameters) {
   376  	m.Log.Info("creating chain",
   377  		zap.Stringer("subnetID", chainParams.SubnetID),
   378  		zap.Stringer("chainID", chainParams.ID),
   379  		zap.Stringer("vmID", chainParams.VMID),
   380  	)
   381  
   382  	sb, _ := m.Subnets.GetOrCreate(chainParams.SubnetID)
   383  
   384  	// Note: buildChain builds all chain's relevant objects (notably engine and handler)
   385  	// but does not start their operations. Starting of the handler (which could potentially
   386  	// issue some internal messages), is delayed until chain dispatching is started and
   387  	// the chain is registered in the manager. This ensures that no message generated by handler
   388  	// upon start is dropped.
   389  	chain, err := m.buildChain(chainParams, sb)
   390  	if err != nil {
   391  		if m.CriticalChains.Contains(chainParams.ID) {
   392  			// Shut down if we fail to create a required chain (i.e. X, P or C)
   393  			m.Log.Fatal("error creating required chain",
   394  				zap.Stringer("subnetID", chainParams.SubnetID),
   395  				zap.Stringer("chainID", chainParams.ID),
   396  				zap.Stringer("vmID", chainParams.VMID),
   397  				zap.Error(err),
   398  			)
   399  			go m.ShutdownNodeFunc(1)
   400  			return
   401  		}
   402  
   403  		chainAlias := m.PrimaryAliasOrDefault(chainParams.ID)
   404  		m.Log.Error("error creating chain",
   405  			zap.Stringer("subnetID", chainParams.SubnetID),
   406  			zap.Stringer("chainID", chainParams.ID),
   407  			zap.String("chainAlias", chainAlias),
   408  			zap.Stringer("vmID", chainParams.VMID),
   409  			zap.Error(err),
   410  		)
   411  
   412  		// Register the health check for this chain regardless of if it was
   413  		// created or not. This attempts to notify the node operator that their
   414  		// node may not be properly validating the subnet they expect to be
   415  		// validating.
   416  		healthCheckErr := fmt.Errorf("failed to create chain on subnet %s: %w", chainParams.SubnetID, err)
   417  		err := m.Health.RegisterHealthCheck(
   418  			chainAlias,
   419  			health.CheckerFunc(func(context.Context) (interface{}, error) {
   420  				return nil, healthCheckErr
   421  			}),
   422  			chainParams.SubnetID.String(),
   423  		)
   424  		if err != nil {
   425  			m.Log.Error("failed to register failing health check",
   426  				zap.Stringer("subnetID", chainParams.SubnetID),
   427  				zap.Stringer("chainID", chainParams.ID),
   428  				zap.String("chainAlias", chainAlias),
   429  				zap.Stringer("vmID", chainParams.VMID),
   430  				zap.Error(err),
   431  			)
   432  		}
   433  		return
   434  	}
   435  
   436  	m.chainsLock.Lock()
   437  	m.chains[chainParams.ID] = chain.Handler
   438  	m.chainsLock.Unlock()
   439  
   440  	// Associate the newly created chain with its default alias
   441  	if err := m.Alias(chainParams.ID, chainParams.ID.String()); err != nil {
   442  		m.Log.Error("failed to alias the new chain with itself",
   443  			zap.Stringer("subnetID", chainParams.SubnetID),
   444  			zap.Stringer("chainID", chainParams.ID),
   445  			zap.Stringer("vmID", chainParams.VMID),
   446  			zap.Error(err),
   447  		)
   448  	}
   449  
   450  	// Notify those that registered to be notified when a new chain is created
   451  	m.notifyRegistrants(chain.Name, chain.Context, chain.VM)
   452  
   453  	// Allows messages to be routed to the new chain. If the handler hasn't been
   454  	// started and a message is forwarded, then the message will block until the
   455  	// handler is started.
   456  	m.ManagerConfig.Router.AddChain(context.TODO(), chain.Handler)
   457  
   458  	// Register bootstrapped health checks after P chain has been added to
   459  	// chains.
   460  	//
   461  	// Note: Registering this after the chain has been tracked prevents a race
   462  	//       condition between the health check and adding the first chain to
   463  	//       the manager.
   464  	if chainParams.ID == constants.PlatformChainID {
   465  		if err := m.registerBootstrappedHealthChecks(); err != nil {
   466  			chain.Handler.StopWithError(context.TODO(), err)
   467  		}
   468  	}
   469  
   470  	// Tell the chain to start processing messages.
   471  	// If the X, P, or C Chain panics, do not attempt to recover
   472  	chain.Handler.Start(context.TODO(), !m.CriticalChains.Contains(chainParams.ID))
   473  }
   474  
   475  // Create a chain
   476  func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*chain, error) {
   477  	if chainParams.ID != constants.PlatformChainID && chainParams.VMID == constants.PlatformVMID {
   478  		return nil, errCreatePlatformVM
   479  	}
   480  	primaryAlias := m.PrimaryAliasOrDefault(chainParams.ID)
   481  
   482  	// Create this chain's data directory
   483  	chainDataDir := filepath.Join(m.ChainDataDir, chainParams.ID.String())
   484  	if err := os.MkdirAll(chainDataDir, perms.ReadWriteExecute); err != nil {
   485  		return nil, fmt.Errorf("error while creating chain data directory %w", err)
   486  	}
   487  
   488  	// Create the log and context of the chain
   489  	chainLog, err := m.LogFactory.MakeChain(primaryAlias)
   490  	if err != nil {
   491  		return nil, fmt.Errorf("error while creating chain's log %w", err)
   492  	}
   493  
   494  	snowmanMetrics, err := metrics.MakeAndRegister(
   495  		m.snowmanGatherer,
   496  		primaryAlias,
   497  	)
   498  	if err != nil {
   499  		return nil, err
   500  	}
   501  
   502  	vmMetrics, err := m.getOrMakeVMRegisterer(chainParams.VMID, primaryAlias)
   503  	if err != nil {
   504  		return nil, err
   505  	}
   506  
   507  	ctx := &snow.ConsensusContext{
   508  		Context: &snow.Context{
   509  			NetworkID: m.NetworkID,
   510  			SubnetID:  chainParams.SubnetID,
   511  			ChainID:   chainParams.ID,
   512  			NodeID:    m.NodeID,
   513  			PublicKey: bls.PublicFromSecretKey(m.StakingBLSKey),
   514  
   515  			XChainID:    m.XChainID,
   516  			CChainID:    m.CChainID,
   517  			AVAXAssetID: m.AVAXAssetID,
   518  
   519  			Log:          chainLog,
   520  			Keystore:     m.Keystore.NewBlockchainKeyStore(chainParams.ID),
   521  			SharedMemory: m.AtomicMemory.NewSharedMemory(chainParams.ID),
   522  			BCLookup:     m,
   523  			Metrics:      vmMetrics,
   524  
   525  			WarpSigner: warp.NewSigner(m.StakingBLSKey, m.NetworkID, chainParams.ID),
   526  
   527  			ValidatorState: m.validatorState,
   528  			ChainDataDir:   chainDataDir,
   529  		},
   530  		PrimaryAlias:   primaryAlias,
   531  		Registerer:     snowmanMetrics,
   532  		BlockAcceptor:  m.BlockAcceptorGroup,
   533  		TxAcceptor:     m.TxAcceptorGroup,
   534  		VertexAcceptor: m.VertexAcceptorGroup,
   535  	}
   536  
   537  	// Get a factory for the vm we want to use on our chain
   538  	vmFactory, err := m.VMManager.GetFactory(chainParams.VMID)
   539  	if err != nil {
   540  		return nil, fmt.Errorf("error while getting vmFactory: %w", err)
   541  	}
   542  
   543  	// Create the chain
   544  	vm, err := vmFactory.New(chainLog)
   545  	if err != nil {
   546  		return nil, fmt.Errorf("error while creating vm: %w", err)
   547  	}
   548  	// TODO: Shutdown VM if an error occurs
   549  
   550  	chainFxs := make([]*common.Fx, len(chainParams.FxIDs))
   551  	for i, fxID := range chainParams.FxIDs {
   552  		fxFactory, ok := fxs[fxID]
   553  		if !ok {
   554  			return nil, fmt.Errorf("fx %s not found", fxID)
   555  		}
   556  
   557  		chainFxs[i] = &common.Fx{
   558  			ID: fxID,
   559  			Fx: fxFactory.New(),
   560  		}
   561  	}
   562  
   563  	var chain *chain
   564  	switch vm := vm.(type) {
   565  	case vertex.LinearizableVMWithEngine:
   566  		chain, err = m.createAvalancheChain(
   567  			ctx,
   568  			chainParams.GenesisData,
   569  			m.Validators,
   570  			vm,
   571  			chainFxs,
   572  			sb,
   573  		)
   574  		if err != nil {
   575  			return nil, fmt.Errorf("error while creating new avalanche vm %w", err)
   576  		}
   577  	case block.ChainVM:
   578  		beacons := m.Validators
   579  		if chainParams.ID == constants.PlatformChainID {
   580  			beacons = chainParams.CustomBeacons
   581  		}
   582  
   583  		chain, err = m.createSnowmanChain(
   584  			ctx,
   585  			chainParams.GenesisData,
   586  			m.Validators,
   587  			beacons,
   588  			vm,
   589  			chainFxs,
   590  			sb,
   591  		)
   592  		if err != nil {
   593  			return nil, fmt.Errorf("error while creating new snowman vm %w", err)
   594  		}
   595  	default:
   596  		return nil, errUnknownVMType
   597  	}
   598  
   599  	// Register the chain with the timeout manager
   600  	if err := m.TimeoutManager.RegisterChain(ctx); err != nil {
   601  		return nil, err
   602  	}
   603  
   604  	return chain, nil
   605  }
   606  
   607  func (m *manager) AddRegistrant(r Registrant) {
   608  	m.registrants = append(m.registrants, r)
   609  }
   610  
   611  // Create a DAG-based blockchain that uses Avalanche
   612  func (m *manager) createAvalancheChain(
   613  	ctx *snow.ConsensusContext,
   614  	genesisData []byte,
   615  	vdrs validators.Manager,
   616  	vm vertex.LinearizableVMWithEngine,
   617  	fxs []*common.Fx,
   618  	sb subnets.Subnet,
   619  ) (*chain, error) {
   620  	ctx.Lock.Lock()
   621  	defer ctx.Lock.Unlock()
   622  
   623  	ctx.State.Set(snow.EngineState{
   624  		Type:  p2ppb.EngineType_ENGINE_TYPE_AVALANCHE,
   625  		State: snow.Initializing,
   626  	})
   627  
   628  	primaryAlias := m.PrimaryAliasOrDefault(ctx.ChainID)
   629  	meterDBReg, err := metrics.MakeAndRegister(
   630  		m.MeterDBMetrics,
   631  		primaryAlias,
   632  	)
   633  	if err != nil {
   634  		return nil, err
   635  	}
   636  
   637  	meterDB, err := meterdb.New(meterDBReg, m.DB)
   638  	if err != nil {
   639  		return nil, err
   640  	}
   641  
   642  	prefixDB := prefixdb.New(ctx.ChainID[:], meterDB)
   643  	vmDB := prefixdb.New(VMDBPrefix, prefixDB)
   644  	vertexDB := prefixdb.New(VertexDBPrefix, prefixDB)
   645  	vertexBootstrappingDB := prefixdb.New(VertexBootstrappingDBPrefix, prefixDB)
   646  	txBootstrappingDB := prefixdb.New(TxBootstrappingDBPrefix, prefixDB)
   647  	blockBootstrappingDB := prefixdb.New(BlockBootstrappingDBPrefix, prefixDB)
   648  
   649  	avalancheMetrics, err := metrics.MakeAndRegister(
   650  		m.avalancheGatherer,
   651  		primaryAlias,
   652  	)
   653  	if err != nil {
   654  		return nil, err
   655  	}
   656  
   657  	vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", avalancheMetrics)
   658  	if err != nil {
   659  		return nil, err
   660  	}
   661  	txBlocker, err := queue.New(txBootstrappingDB, "tx", avalancheMetrics)
   662  	if err != nil {
   663  		return nil, err
   664  	}
   665  
   666  	// Passes messages from the avalanche engines to the network
   667  	avalancheMessageSender, err := sender.New(
   668  		ctx,
   669  		m.MsgCreator,
   670  		m.Net,
   671  		m.ManagerConfig.Router,
   672  		m.TimeoutManager,
   673  		p2ppb.EngineType_ENGINE_TYPE_AVALANCHE,
   674  		sb,
   675  		avalancheMetrics,
   676  	)
   677  	if err != nil {
   678  		return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err)
   679  	}
   680  
   681  	if m.TracingEnabled {
   682  		avalancheMessageSender = sender.Trace(avalancheMessageSender, m.Tracer)
   683  	}
   684  
   685  	// Passes messages from the snowman engines to the network
   686  	snowmanMessageSender, err := sender.New(
   687  		ctx,
   688  		m.MsgCreator,
   689  		m.Net,
   690  		m.ManagerConfig.Router,
   691  		m.TimeoutManager,
   692  		p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
   693  		sb,
   694  		ctx.Registerer,
   695  	)
   696  	if err != nil {
   697  		return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err)
   698  	}
   699  
   700  	if m.TracingEnabled {
   701  		snowmanMessageSender = sender.Trace(snowmanMessageSender, m.Tracer)
   702  	}
   703  
   704  	chainConfig, err := m.getChainConfig(ctx.ChainID)
   705  	if err != nil {
   706  		return nil, fmt.Errorf("error while fetching chain config: %w", err)
   707  	}
   708  
   709  	dagVM := vm
   710  	if m.MeterVMEnabled {
   711  		meterdagvmReg, err := metrics.MakeAndRegister(
   712  			m.meterDAGVMGatherer,
   713  			primaryAlias,
   714  		)
   715  		if err != nil {
   716  			return nil, err
   717  		}
   718  
   719  		dagVM = metervm.NewVertexVM(dagVM, meterdagvmReg)
   720  	}
   721  	if m.TracingEnabled {
   722  		dagVM = tracedvm.NewVertexVM(dagVM, m.Tracer)
   723  	}
   724  
   725  	// Handles serialization/deserialization of vertices and also the
   726  	// persistence of vertices
   727  	vtxManager := state.NewSerializer(
   728  		state.SerializerConfig{
   729  			ChainID:     ctx.ChainID,
   730  			VM:          dagVM,
   731  			DB:          vertexDB,
   732  			Log:         ctx.Log,
   733  			CortinaTime: version.GetCortinaTime(ctx.NetworkID),
   734  		},
   735  	)
   736  
   737  	// The channel through which a VM may send messages to the consensus engine
   738  	// VM uses this channel to notify engine that a block is ready to be made
   739  	msgChan := make(chan common.Message, defaultChannelSize)
   740  
   741  	// The only difference between using avalancheMessageSender and
   742  	// snowmanMessageSender here is where the metrics will be placed. Because we
   743  	// end up using this sender after the linearization, we pass in
   744  	// snowmanMessageSender here.
   745  	err = dagVM.Initialize(
   746  		context.TODO(),
   747  		ctx.Context,
   748  		vmDB,
   749  		genesisData,
   750  		chainConfig.Upgrade,
   751  		chainConfig.Config,
   752  		msgChan,
   753  		fxs,
   754  		snowmanMessageSender,
   755  	)
   756  	if err != nil {
   757  		return nil, fmt.Errorf("error during vm's Initialize: %w", err)
   758  	}
   759  
   760  	// Initialize the ProposerVM and the vm wrapped inside it
   761  	var (
   762  		minBlockDelay       = proposervm.DefaultMinBlockDelay
   763  		numHistoricalBlocks = proposervm.DefaultNumHistoricalBlocks
   764  	)
   765  	if subnetCfg, ok := m.SubnetConfigs[ctx.SubnetID]; ok {
   766  		minBlockDelay = subnetCfg.ProposerMinBlockDelay
   767  		numHistoricalBlocks = subnetCfg.ProposerNumHistoricalBlocks
   768  	}
   769  	m.Log.Info("creating proposervm wrapper",
   770  		zap.Time("activationTime", m.ApricotPhase4Time),
   771  		zap.Uint64("minPChainHeight", m.ApricotPhase4MinPChainHeight),
   772  		zap.Duration("minBlockDelay", minBlockDelay),
   773  		zap.Uint64("numHistoricalBlocks", numHistoricalBlocks),
   774  	)
   775  
   776  	// Note: this does not use [dagVM] to ensure we use the [vm]'s height index.
   777  	untracedVMWrappedInsideProposerVM := NewLinearizeOnInitializeVM(vm)
   778  
   779  	var vmWrappedInsideProposerVM block.ChainVM = untracedVMWrappedInsideProposerVM
   780  	if m.TracingEnabled {
   781  		vmWrappedInsideProposerVM = tracedvm.NewBlockVM(vmWrappedInsideProposerVM, primaryAlias, m.Tracer)
   782  	}
   783  
   784  	proposervmReg, err := metrics.MakeAndRegister(
   785  		m.proposervmGatherer,
   786  		primaryAlias,
   787  	)
   788  	if err != nil {
   789  		return nil, err
   790  	}
   791  
   792  	// Note: vmWrappingProposerVM is the VM that the Snowman engines should be
   793  	// using.
   794  	var vmWrappingProposerVM block.ChainVM = proposervm.New(
   795  		vmWrappedInsideProposerVM,
   796  		proposervm.Config{
   797  			ActivationTime:      m.ApricotPhase4Time,
   798  			DurangoTime:         version.GetDurangoTime(m.NetworkID),
   799  			MinimumPChainHeight: m.ApricotPhase4MinPChainHeight,
   800  			MinBlkDelay:         minBlockDelay,
   801  			NumHistoricalBlocks: numHistoricalBlocks,
   802  			StakingLeafSigner:   m.StakingTLSSigner,
   803  			StakingCertLeaf:     m.StakingTLSCert,
   804  			Registerer:          proposervmReg,
   805  		},
   806  	)
   807  
   808  	if m.MeterVMEnabled {
   809  		meterchainvmReg, err := metrics.MakeAndRegister(
   810  			m.meterChainVMGatherer,
   811  			primaryAlias,
   812  		)
   813  		if err != nil {
   814  			return nil, err
   815  		}
   816  
   817  		vmWrappingProposerVM = metervm.NewBlockVM(vmWrappingProposerVM, meterchainvmReg)
   818  	}
   819  	if m.TracingEnabled {
   820  		vmWrappingProposerVM = tracedvm.NewBlockVM(vmWrappingProposerVM, "proposervm", m.Tracer)
   821  	}
   822  
   823  	// Note: linearizableVM is the VM that the Avalanche engines should be
   824  	// using.
   825  	linearizableVM := &initializeOnLinearizeVM{
   826  		DAGVM:          dagVM,
   827  		vmToInitialize: vmWrappingProposerVM,
   828  		vmToLinearize:  untracedVMWrappedInsideProposerVM,
   829  
   830  		ctx:          ctx.Context,
   831  		db:           vmDB,
   832  		genesisBytes: genesisData,
   833  		upgradeBytes: chainConfig.Upgrade,
   834  		configBytes:  chainConfig.Config,
   835  		toEngine:     msgChan,
   836  		fxs:          fxs,
   837  		appSender:    snowmanMessageSender,
   838  	}
   839  
   840  	bootstrapWeight, err := vdrs.TotalWeight(ctx.SubnetID)
   841  	if err != nil {
   842  		return nil, fmt.Errorf("error while fetching weight for subnet %s: %w", ctx.SubnetID, err)
   843  	}
   844  
   845  	consensusParams := sb.Config().ConsensusParameters
   846  	sampleK := consensusParams.K
   847  	if uint64(sampleK) > bootstrapWeight {
   848  		sampleK = int(bootstrapWeight)
   849  	}
   850  
   851  	stakeReg, err := metrics.MakeAndRegister(
   852  		m.stakeGatherer,
   853  		primaryAlias,
   854  	)
   855  	if err != nil {
   856  		return nil, err
   857  	}
   858  
   859  	connectedValidators, err := tracker.NewMeteredPeers(stakeReg)
   860  	if err != nil {
   861  		return nil, fmt.Errorf("error creating peer tracker: %w", err)
   862  	}
   863  	vdrs.RegisterSetCallbackListener(ctx.SubnetID, connectedValidators)
   864  
   865  	p2pReg, err := metrics.MakeAndRegister(
   866  		m.p2pGatherer,
   867  		primaryAlias,
   868  	)
   869  	if err != nil {
   870  		return nil, err
   871  	}
   872  
   873  	peerTracker, err := p2p.NewPeerTracker(
   874  		ctx.Log,
   875  		"peer_tracker",
   876  		p2pReg,
   877  		set.Of(ctx.NodeID),
   878  		nil,
   879  	)
   880  	if err != nil {
   881  		return nil, fmt.Errorf("error creating peer tracker: %w", err)
   882  	}
   883  
   884  	handlerReg, err := metrics.MakeAndRegister(
   885  		m.handlerGatherer,
   886  		primaryAlias,
   887  	)
   888  	if err != nil {
   889  		return nil, err
   890  	}
   891  
   892  	// Asynchronously passes messages from the network to the consensus engine
   893  	h, err := handler.New(
   894  		ctx,
   895  		vdrs,
   896  		msgChan,
   897  		m.FrontierPollFrequency,
   898  		m.ConsensusAppConcurrency,
   899  		m.ResourceTracker,
   900  		validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector
   901  		sb,
   902  		connectedValidators,
   903  		peerTracker,
   904  		handlerReg,
   905  	)
   906  	if err != nil {
   907  		return nil, fmt.Errorf("error initializing network handler: %w", err)
   908  	}
   909  
   910  	connectedBeacons := tracker.NewPeers()
   911  	startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4)
   912  	vdrs.RegisterSetCallbackListener(ctx.SubnetID, startupTracker)
   913  
   914  	snowGetHandler, err := snowgetter.New(
   915  		vmWrappingProposerVM,
   916  		snowmanMessageSender,
   917  		ctx.Log,
   918  		m.BootstrapMaxTimeGetAncestors,
   919  		m.BootstrapAncestorsMaxContainersSent,
   920  		ctx.Registerer,
   921  	)
   922  	if err != nil {
   923  		return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err)
   924  	}
   925  
   926  	var snowmanConsensus smcon.Consensus = &smcon.Topological{}
   927  	if m.TracingEnabled {
   928  		snowmanConsensus = smcon.Trace(snowmanConsensus, m.Tracer)
   929  	}
   930  
   931  	// Create engine, bootstrapper and state-syncer in this order,
   932  	// to make sure start callbacks are duly initialized
   933  	snowmanEngineConfig := smeng.Config{
   934  		Ctx:                 ctx,
   935  		AllGetsServer:       snowGetHandler,
   936  		VM:                  vmWrappingProposerVM,
   937  		Sender:              snowmanMessageSender,
   938  		Validators:          vdrs,
   939  		ConnectedValidators: connectedValidators,
   940  		Params:              consensusParams,
   941  		Consensus:           snowmanConsensus,
   942  	}
   943  	var snowmanEngine common.Engine
   944  	snowmanEngine, err = smeng.New(snowmanEngineConfig)
   945  	if err != nil {
   946  		return nil, fmt.Errorf("error initializing snowman engine: %w", err)
   947  	}
   948  
   949  	if m.TracingEnabled {
   950  		snowmanEngine = common.TraceEngine(snowmanEngine, m.Tracer)
   951  	}
   952  
   953  	// create bootstrap gear
   954  	bootstrapCfg := smbootstrap.Config{
   955  		AllGetsServer:                  snowGetHandler,
   956  		Ctx:                            ctx,
   957  		Beacons:                        vdrs,
   958  		SampleK:                        sampleK,
   959  		StartupTracker:                 startupTracker,
   960  		Sender:                         snowmanMessageSender,
   961  		BootstrapTracker:               sb,
   962  		Timer:                          h,
   963  		PeerTracker:                    peerTracker,
   964  		AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived,
   965  		DB:                             blockBootstrappingDB,
   966  		VM:                             vmWrappingProposerVM,
   967  	}
   968  	var snowmanBootstrapper common.BootstrapableEngine
   969  	snowmanBootstrapper, err = smbootstrap.New(
   970  		bootstrapCfg,
   971  		snowmanEngine.Start,
   972  	)
   973  	if err != nil {
   974  		return nil, fmt.Errorf("error initializing snowman bootstrapper: %w", err)
   975  	}
   976  
   977  	if m.TracingEnabled {
   978  		snowmanBootstrapper = common.TraceBootstrapableEngine(snowmanBootstrapper, m.Tracer)
   979  	}
   980  
   981  	avaGetHandler, err := avagetter.New(
   982  		vtxManager,
   983  		avalancheMessageSender,
   984  		ctx.Log,
   985  		m.BootstrapMaxTimeGetAncestors,
   986  		m.BootstrapAncestorsMaxContainersSent,
   987  		avalancheMetrics,
   988  	)
   989  	if err != nil {
   990  		return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err)
   991  	}
   992  
   993  	// create engine gear
   994  	avalancheEngine := aveng.New(ctx, avaGetHandler, linearizableVM)
   995  	if m.TracingEnabled {
   996  		avalancheEngine = common.TraceEngine(avalancheEngine, m.Tracer)
   997  	}
   998  
   999  	// create bootstrap gear
  1000  	avalancheBootstrapperConfig := avbootstrap.Config{
  1001  		AllGetsServer:                  avaGetHandler,
  1002  		Ctx:                            ctx,
  1003  		StartupTracker:                 startupTracker,
  1004  		Sender:                         avalancheMessageSender,
  1005  		PeerTracker:                    peerTracker,
  1006  		AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived,
  1007  		VtxBlocked:                     vtxBlocker,
  1008  		TxBlocked:                      txBlocker,
  1009  		Manager:                        vtxManager,
  1010  		VM:                             linearizableVM,
  1011  	}
  1012  	if ctx.ChainID == m.XChainID {
  1013  		avalancheBootstrapperConfig.StopVertexID = version.CortinaXChainStopVertexID[ctx.NetworkID]
  1014  	}
  1015  
  1016  	avalancheBootstrapper, err := avbootstrap.New(
  1017  		avalancheBootstrapperConfig,
  1018  		snowmanBootstrapper.Start,
  1019  		avalancheMetrics,
  1020  	)
  1021  	if err != nil {
  1022  		return nil, fmt.Errorf("error initializing avalanche bootstrapper: %w", err)
  1023  	}
  1024  
  1025  	if m.TracingEnabled {
  1026  		avalancheBootstrapper = common.TraceBootstrapableEngine(avalancheBootstrapper, m.Tracer)
  1027  	}
  1028  
  1029  	h.SetEngineManager(&handler.EngineManager{
  1030  		Avalanche: &handler.Engine{
  1031  			StateSyncer:  nil,
  1032  			Bootstrapper: avalancheBootstrapper,
  1033  			Consensus:    avalancheEngine,
  1034  		},
  1035  		Snowman: &handler.Engine{
  1036  			StateSyncer:  nil,
  1037  			Bootstrapper: snowmanBootstrapper,
  1038  			Consensus:    snowmanEngine,
  1039  		},
  1040  	})
  1041  
  1042  	// Register health check for this chain
  1043  	if err := m.Health.RegisterHealthCheck(primaryAlias, h, ctx.SubnetID.String()); err != nil {
  1044  		return nil, fmt.Errorf("couldn't add health check for chain %s: %w", primaryAlias, err)
  1045  	}
  1046  
  1047  	return &chain{
  1048  		Name:    primaryAlias,
  1049  		Context: ctx,
  1050  		VM:      dagVM,
  1051  		Handler: h,
  1052  	}, nil
  1053  }
  1054  
  1055  // Create a linear chain using the Snowman consensus engine
  1056  func (m *manager) createSnowmanChain(
  1057  	ctx *snow.ConsensusContext,
  1058  	genesisData []byte,
  1059  	vdrs validators.Manager,
  1060  	beacons validators.Manager,
  1061  	vm block.ChainVM,
  1062  	fxs []*common.Fx,
  1063  	sb subnets.Subnet,
  1064  ) (*chain, error) {
  1065  	ctx.Lock.Lock()
  1066  	defer ctx.Lock.Unlock()
  1067  
  1068  	ctx.State.Set(snow.EngineState{
  1069  		Type:  p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
  1070  		State: snow.Initializing,
  1071  	})
  1072  
  1073  	primaryAlias := m.PrimaryAliasOrDefault(ctx.ChainID)
  1074  	meterDBReg, err := metrics.MakeAndRegister(
  1075  		m.MeterDBMetrics,
  1076  		primaryAlias,
  1077  	)
  1078  	if err != nil {
  1079  		return nil, err
  1080  	}
  1081  
  1082  	meterDB, err := meterdb.New(meterDBReg, m.DB)
  1083  	if err != nil {
  1084  		return nil, err
  1085  	}
  1086  
  1087  	prefixDB := prefixdb.New(ctx.ChainID[:], meterDB)
  1088  	vmDB := prefixdb.New(VMDBPrefix, prefixDB)
  1089  	bootstrappingDB := prefixdb.New(ChainBootstrappingDBPrefix, prefixDB)
  1090  
  1091  	// Passes messages from the consensus engine to the network
  1092  	messageSender, err := sender.New(
  1093  		ctx,
  1094  		m.MsgCreator,
  1095  		m.Net,
  1096  		m.ManagerConfig.Router,
  1097  		m.TimeoutManager,
  1098  		p2ppb.EngineType_ENGINE_TYPE_SNOWMAN,
  1099  		sb,
  1100  		ctx.Registerer,
  1101  	)
  1102  	if err != nil {
  1103  		return nil, fmt.Errorf("couldn't initialize sender: %w", err)
  1104  	}
  1105  
  1106  	if m.TracingEnabled {
  1107  		messageSender = sender.Trace(messageSender, m.Tracer)
  1108  	}
  1109  
  1110  	var (
  1111  		bootstrapFunc   func()
  1112  		subnetConnector = validators.UnhandledSubnetConnector
  1113  	)
  1114  	// If [m.validatorState] is nil then we are creating the P-Chain. Since the
  1115  	// P-Chain is the first chain to be created, we can use it to initialize
  1116  	// required interfaces for the other chains
  1117  	if m.validatorState == nil {
  1118  		valState, ok := vm.(validators.State)
  1119  		if !ok {
  1120  			return nil, fmt.Errorf("expected validators.State but got %T", vm)
  1121  		}
  1122  
  1123  		if m.TracingEnabled {
  1124  			valState = validators.Trace(valState, "platformvm", m.Tracer)
  1125  		}
  1126  
  1127  		// Notice that this context is left unlocked. This is because the
  1128  		// lock will already be held when accessing these values on the
  1129  		// P-chain.
  1130  		ctx.ValidatorState = valState
  1131  
  1132  		// Initialize the validator state for future chains.
  1133  		m.validatorState = validators.NewLockedState(&ctx.Lock, valState)
  1134  		if m.TracingEnabled {
  1135  			m.validatorState = validators.Trace(m.validatorState, "lockedState", m.Tracer)
  1136  		}
  1137  
  1138  		if !m.ManagerConfig.SybilProtectionEnabled {
  1139  			m.validatorState = validators.NewNoValidatorsState(m.validatorState)
  1140  			ctx.ValidatorState = validators.NewNoValidatorsState(ctx.ValidatorState)
  1141  		}
  1142  
  1143  		// Set this func only for platform
  1144  		//
  1145  		// The snowman bootstrapper ensures this function is only executed once, so
  1146  		// we don't need to be concerned about closing this channel multiple times.
  1147  		bootstrapFunc = func() {
  1148  			close(m.unblockChainCreatorCh)
  1149  		}
  1150  
  1151  		// Set up the subnet connector for the P-Chain
  1152  		subnetConnector, ok = vm.(validators.SubnetConnector)
  1153  		if !ok {
  1154  			return nil, fmt.Errorf("expected validators.SubnetConnector but got %T", vm)
  1155  		}
  1156  	}
  1157  
  1158  	// Initialize the ProposerVM and the vm wrapped inside it
  1159  	chainConfig, err := m.getChainConfig(ctx.ChainID)
  1160  	if err != nil {
  1161  		return nil, fmt.Errorf("error while fetching chain config: %w", err)
  1162  	}
  1163  
  1164  	var (
  1165  		minBlockDelay       = proposervm.DefaultMinBlockDelay
  1166  		numHistoricalBlocks = proposervm.DefaultNumHistoricalBlocks
  1167  	)
  1168  	if subnetCfg, ok := m.SubnetConfigs[ctx.SubnetID]; ok {
  1169  		minBlockDelay = subnetCfg.ProposerMinBlockDelay
  1170  		numHistoricalBlocks = subnetCfg.ProposerNumHistoricalBlocks
  1171  	}
  1172  	m.Log.Info("creating proposervm wrapper",
  1173  		zap.Time("activationTime", m.ApricotPhase4Time),
  1174  		zap.Uint64("minPChainHeight", m.ApricotPhase4MinPChainHeight),
  1175  		zap.Duration("minBlockDelay", minBlockDelay),
  1176  		zap.Uint64("numHistoricalBlocks", numHistoricalBlocks),
  1177  	)
  1178  
  1179  	if m.TracingEnabled {
  1180  		vm = tracedvm.NewBlockVM(vm, primaryAlias, m.Tracer)
  1181  	}
  1182  
  1183  	proposervmReg, err := metrics.MakeAndRegister(
  1184  		m.proposervmGatherer,
  1185  		primaryAlias,
  1186  	)
  1187  	if err != nil {
  1188  		return nil, err
  1189  	}
  1190  
  1191  	vm = proposervm.New(
  1192  		vm,
  1193  		proposervm.Config{
  1194  			ActivationTime:      m.ApricotPhase4Time,
  1195  			DurangoTime:         version.GetDurangoTime(m.NetworkID),
  1196  			MinimumPChainHeight: m.ApricotPhase4MinPChainHeight,
  1197  			MinBlkDelay:         minBlockDelay,
  1198  			NumHistoricalBlocks: numHistoricalBlocks,
  1199  			StakingLeafSigner:   m.StakingTLSSigner,
  1200  			StakingCertLeaf:     m.StakingTLSCert,
  1201  			Registerer:          proposervmReg,
  1202  		},
  1203  	)
  1204  
  1205  	if m.MeterVMEnabled {
  1206  		meterchainvmReg, err := metrics.MakeAndRegister(
  1207  			m.meterChainVMGatherer,
  1208  			primaryAlias,
  1209  		)
  1210  		if err != nil {
  1211  			return nil, err
  1212  		}
  1213  
  1214  		vm = metervm.NewBlockVM(vm, meterchainvmReg)
  1215  	}
  1216  	if m.TracingEnabled {
  1217  		vm = tracedvm.NewBlockVM(vm, "proposervm", m.Tracer)
  1218  	}
  1219  
  1220  	// The channel through which a VM may send messages to the consensus engine
  1221  	// VM uses this channel to notify engine that a block is ready to be made
  1222  	msgChan := make(chan common.Message, defaultChannelSize)
  1223  
  1224  	if err := vm.Initialize(
  1225  		context.TODO(),
  1226  		ctx.Context,
  1227  		vmDB,
  1228  		genesisData,
  1229  		chainConfig.Upgrade,
  1230  		chainConfig.Config,
  1231  		msgChan,
  1232  		fxs,
  1233  		messageSender,
  1234  	); err != nil {
  1235  		return nil, err
  1236  	}
  1237  
  1238  	bootstrapWeight, err := beacons.TotalWeight(ctx.SubnetID)
  1239  	if err != nil {
  1240  		return nil, fmt.Errorf("error while fetching weight for subnet %s: %w", ctx.SubnetID, err)
  1241  	}
  1242  
  1243  	consensusParams := sb.Config().ConsensusParameters
  1244  	sampleK := consensusParams.K
  1245  	if uint64(sampleK) > bootstrapWeight {
  1246  		sampleK = int(bootstrapWeight)
  1247  	}
  1248  
  1249  	stakeReg, err := metrics.MakeAndRegister(
  1250  		m.stakeGatherer,
  1251  		primaryAlias,
  1252  	)
  1253  	if err != nil {
  1254  		return nil, err
  1255  	}
  1256  
  1257  	connectedValidators, err := tracker.NewMeteredPeers(stakeReg)
  1258  	if err != nil {
  1259  		return nil, fmt.Errorf("error creating peer tracker: %w", err)
  1260  	}
  1261  	vdrs.RegisterSetCallbackListener(ctx.SubnetID, connectedValidators)
  1262  
  1263  	p2pReg, err := metrics.MakeAndRegister(
  1264  		m.p2pGatherer,
  1265  		primaryAlias,
  1266  	)
  1267  	if err != nil {
  1268  		return nil, err
  1269  	}
  1270  
  1271  	peerTracker, err := p2p.NewPeerTracker(
  1272  		ctx.Log,
  1273  		"peer_tracker",
  1274  		p2pReg,
  1275  		set.Of(ctx.NodeID),
  1276  		nil,
  1277  	)
  1278  	if err != nil {
  1279  		return nil, fmt.Errorf("error creating peer tracker: %w", err)
  1280  	}
  1281  
  1282  	handlerReg, err := metrics.MakeAndRegister(
  1283  		m.handlerGatherer,
  1284  		primaryAlias,
  1285  	)
  1286  	if err != nil {
  1287  		return nil, err
  1288  	}
  1289  
  1290  	// Asynchronously passes messages from the network to the consensus engine
  1291  	h, err := handler.New(
  1292  		ctx,
  1293  		vdrs,
  1294  		msgChan,
  1295  		m.FrontierPollFrequency,
  1296  		m.ConsensusAppConcurrency,
  1297  		m.ResourceTracker,
  1298  		subnetConnector,
  1299  		sb,
  1300  		connectedValidators,
  1301  		peerTracker,
  1302  		handlerReg,
  1303  	)
  1304  	if err != nil {
  1305  		return nil, fmt.Errorf("couldn't initialize message handler: %w", err)
  1306  	}
  1307  
  1308  	connectedBeacons := tracker.NewPeers()
  1309  	startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4)
  1310  	beacons.RegisterSetCallbackListener(ctx.SubnetID, startupTracker)
  1311  
  1312  	snowGetHandler, err := snowgetter.New(
  1313  		vm,
  1314  		messageSender,
  1315  		ctx.Log,
  1316  		m.BootstrapMaxTimeGetAncestors,
  1317  		m.BootstrapAncestorsMaxContainersSent,
  1318  		ctx.Registerer,
  1319  	)
  1320  	if err != nil {
  1321  		return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err)
  1322  	}
  1323  
  1324  	var consensus smcon.Consensus = &smcon.Topological{}
  1325  	if m.TracingEnabled {
  1326  		consensus = smcon.Trace(consensus, m.Tracer)
  1327  	}
  1328  
  1329  	// Create engine, bootstrapper and state-syncer in this order,
  1330  	// to make sure start callbacks are duly initialized
  1331  	engineConfig := smeng.Config{
  1332  		Ctx:                 ctx,
  1333  		AllGetsServer:       snowGetHandler,
  1334  		VM:                  vm,
  1335  		Sender:              messageSender,
  1336  		Validators:          vdrs,
  1337  		ConnectedValidators: connectedValidators,
  1338  		Params:              consensusParams,
  1339  		Consensus:           consensus,
  1340  		PartialSync:         m.PartialSyncPrimaryNetwork && ctx.ChainID == constants.PlatformChainID,
  1341  	}
  1342  	var engine common.Engine
  1343  	engine, err = smeng.New(engineConfig)
  1344  	if err != nil {
  1345  		return nil, fmt.Errorf("error initializing snowman engine: %w", err)
  1346  	}
  1347  
  1348  	if m.TracingEnabled {
  1349  		engine = common.TraceEngine(engine, m.Tracer)
  1350  	}
  1351  
  1352  	// create bootstrap gear
  1353  	bootstrapCfg := smbootstrap.Config{
  1354  		AllGetsServer:                  snowGetHandler,
  1355  		Ctx:                            ctx,
  1356  		Beacons:                        beacons,
  1357  		SampleK:                        sampleK,
  1358  		StartupTracker:                 startupTracker,
  1359  		Sender:                         messageSender,
  1360  		BootstrapTracker:               sb,
  1361  		Timer:                          h,
  1362  		PeerTracker:                    peerTracker,
  1363  		AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived,
  1364  		DB:                             bootstrappingDB,
  1365  		VM:                             vm,
  1366  		Bootstrapped:                   bootstrapFunc,
  1367  	}
  1368  	var bootstrapper common.BootstrapableEngine
  1369  	bootstrapper, err = smbootstrap.New(
  1370  		bootstrapCfg,
  1371  		engine.Start,
  1372  	)
  1373  	if err != nil {
  1374  		return nil, fmt.Errorf("error initializing snowman bootstrapper: %w", err)
  1375  	}
  1376  
  1377  	if m.TracingEnabled {
  1378  		bootstrapper = common.TraceBootstrapableEngine(bootstrapper, m.Tracer)
  1379  	}
  1380  
  1381  	// create state sync gear
  1382  	stateSyncCfg, err := syncer.NewConfig(
  1383  		snowGetHandler,
  1384  		ctx,
  1385  		startupTracker,
  1386  		messageSender,
  1387  		beacons,
  1388  		sampleK,
  1389  		bootstrapWeight/2+1, // must be > 50%
  1390  		m.StateSyncBeacons,
  1391  		vm,
  1392  	)
  1393  	if err != nil {
  1394  		return nil, fmt.Errorf("couldn't initialize state syncer configuration: %w", err)
  1395  	}
  1396  	stateSyncer := syncer.New(
  1397  		stateSyncCfg,
  1398  		bootstrapper.Start,
  1399  	)
  1400  
  1401  	if m.TracingEnabled {
  1402  		stateSyncer = common.TraceStateSyncer(stateSyncer, m.Tracer)
  1403  	}
  1404  
  1405  	h.SetEngineManager(&handler.EngineManager{
  1406  		Avalanche: nil,
  1407  		Snowman: &handler.Engine{
  1408  			StateSyncer:  stateSyncer,
  1409  			Bootstrapper: bootstrapper,
  1410  			Consensus:    engine,
  1411  		},
  1412  	})
  1413  
  1414  	// Register health checks
  1415  	if err := m.Health.RegisterHealthCheck(primaryAlias, h, ctx.SubnetID.String()); err != nil {
  1416  		return nil, fmt.Errorf("couldn't add health check for chain %s: %w", primaryAlias, err)
  1417  	}
  1418  
  1419  	return &chain{
  1420  		Name:    primaryAlias,
  1421  		Context: ctx,
  1422  		VM:      vm,
  1423  		Handler: h,
  1424  	}, nil
  1425  }
  1426  
  1427  func (m *manager) IsBootstrapped(id ids.ID) bool {
  1428  	m.chainsLock.Lock()
  1429  	chain, exists := m.chains[id]
  1430  	m.chainsLock.Unlock()
  1431  	if !exists {
  1432  		return false
  1433  	}
  1434  
  1435  	return chain.Context().State.Get().State == snow.NormalOp
  1436  }
  1437  
  1438  func (m *manager) registerBootstrappedHealthChecks() error {
  1439  	bootstrappedCheck := health.CheckerFunc(func(context.Context) (interface{}, error) {
  1440  		if subnetIDs := m.Subnets.Bootstrapping(); len(subnetIDs) != 0 {
  1441  			return subnetIDs, errNotBootstrapped
  1442  		}
  1443  		return []ids.ID{}, nil
  1444  	})
  1445  	if err := m.Health.RegisterReadinessCheck("bootstrapped", bootstrappedCheck, health.ApplicationTag); err != nil {
  1446  		return fmt.Errorf("couldn't register bootstrapped readiness check: %w", err)
  1447  	}
  1448  	if err := m.Health.RegisterHealthCheck("bootstrapped", bootstrappedCheck, health.ApplicationTag); err != nil {
  1449  		return fmt.Errorf("couldn't register bootstrapped health check: %w", err)
  1450  	}
  1451  
  1452  	// We should only report unhealthy if the node is partially syncing the
  1453  	// primary network and is a validator.
  1454  	if !m.PartialSyncPrimaryNetwork {
  1455  		return nil
  1456  	}
  1457  
  1458  	partialSyncCheck := health.CheckerFunc(func(context.Context) (interface{}, error) {
  1459  		// Note: The health check is skipped during bootstrapping to allow a
  1460  		// node to sync the network even if it was previously a validator.
  1461  		if !m.IsBootstrapped(constants.PlatformChainID) {
  1462  			return "node is currently bootstrapping", nil
  1463  		}
  1464  		if _, ok := m.Validators.GetValidator(constants.PrimaryNetworkID, m.NodeID); !ok {
  1465  			return "node is not a primary network validator", nil
  1466  		}
  1467  
  1468  		m.Log.Warn("node is a primary network validator",
  1469  			zap.Error(errPartialSyncAsAValidator),
  1470  		)
  1471  		return "node is a primary network validator", errPartialSyncAsAValidator
  1472  	})
  1473  
  1474  	if err := m.Health.RegisterHealthCheck("validation", partialSyncCheck, health.ApplicationTag); err != nil {
  1475  		return fmt.Errorf("couldn't register validation health check: %w", err)
  1476  	}
  1477  	return nil
  1478  }
  1479  
  1480  // Starts chain creation loop to process queued chains
  1481  func (m *manager) StartChainCreator(platformParams ChainParameters) error {
  1482  	// Add the P-Chain to the Primary Network
  1483  	sb, _ := m.Subnets.GetOrCreate(constants.PrimaryNetworkID)
  1484  	sb.AddChain(platformParams.ID)
  1485  
  1486  	// The P-chain is created synchronously to ensure that `VM.Initialize` has
  1487  	// finished before returning from this function. This is required because
  1488  	// the P-chain initializes state that the rest of the node initialization
  1489  	// depends on.
  1490  	m.createChain(platformParams)
  1491  
  1492  	m.Log.Info("starting chain creator")
  1493  	m.chainCreatorExited.Add(1)
  1494  	go m.dispatchChainCreator()
  1495  	return nil
  1496  }
  1497  
  1498  func (m *manager) dispatchChainCreator() {
  1499  	defer m.chainCreatorExited.Done()
  1500  
  1501  	select {
  1502  	// This channel will be closed when Shutdown is called on the manager.
  1503  	case <-m.chainCreatorShutdownCh:
  1504  		return
  1505  	case <-m.unblockChainCreatorCh:
  1506  	}
  1507  
  1508  	// Handle chain creations
  1509  	for {
  1510  		// Get the next chain we should create.
  1511  		// Dequeue waits until an element is pushed, so this is not
  1512  		// busy-looping.
  1513  		chainParams, ok := m.chainsQueue.PopLeft()
  1514  		if !ok { // queue is closed, return directly
  1515  			return
  1516  		}
  1517  		m.createChain(chainParams)
  1518  	}
  1519  }
  1520  
  1521  // Shutdown stops all the chains
  1522  func (m *manager) Shutdown() {
  1523  	m.Log.Info("shutting down chain manager")
  1524  	m.chainsQueue.Close()
  1525  	close(m.chainCreatorShutdownCh)
  1526  	m.chainCreatorExited.Wait()
  1527  	m.ManagerConfig.Router.Shutdown(context.TODO())
  1528  }
  1529  
  1530  // LookupVM returns the ID of the VM associated with an alias
  1531  func (m *manager) LookupVM(alias string) (ids.ID, error) {
  1532  	return m.VMManager.Lookup(alias)
  1533  }
  1534  
  1535  // Notify registrants [those who want to know about the creation of chains]
  1536  // that the specified chain has been created
  1537  func (m *manager) notifyRegistrants(name string, ctx *snow.ConsensusContext, vm common.VM) {
  1538  	for _, registrant := range m.registrants {
  1539  		registrant.RegisterChain(name, ctx, vm)
  1540  	}
  1541  }
  1542  
  1543  // getChainConfig returns value of a entry by looking at ID key and alias key
  1544  // it first searches ID key, then falls back to it's corresponding primary alias
  1545  func (m *manager) getChainConfig(id ids.ID) (ChainConfig, error) {
  1546  	if val, ok := m.ManagerConfig.ChainConfigs[id.String()]; ok {
  1547  		return val, nil
  1548  	}
  1549  	aliases, err := m.Aliases(id)
  1550  	if err != nil {
  1551  		return ChainConfig{}, err
  1552  	}
  1553  	for _, alias := range aliases {
  1554  		if val, ok := m.ManagerConfig.ChainConfigs[alias]; ok {
  1555  			return val, nil
  1556  		}
  1557  	}
  1558  
  1559  	return ChainConfig{}, nil
  1560  }
  1561  
  1562  func (m *manager) getOrMakeVMRegisterer(vmID ids.ID, chainAlias string) (metrics.MultiGatherer, error) {
  1563  	vmGatherer, ok := m.vmGatherer[vmID]
  1564  	if !ok {
  1565  		vmName := constants.VMName(vmID)
  1566  		vmNamespace := metric.AppendNamespace(constants.PlatformName, vmName)
  1567  		vmGatherer = metrics.NewLabelGatherer(ChainLabel)
  1568  		err := m.Metrics.Register(
  1569  			vmNamespace,
  1570  			vmGatherer,
  1571  		)
  1572  		if err != nil {
  1573  			return nil, err
  1574  		}
  1575  		m.vmGatherer[vmID] = vmGatherer
  1576  	}
  1577  
  1578  	chainReg := metrics.NewPrefixGatherer()
  1579  	err := vmGatherer.Register(
  1580  		chainAlias,
  1581  		chainReg,
  1582  	)
  1583  	return chainReg, err
  1584  }