github.com/kaituanwang/hyperledger@v2.0.1+incompatible/orderer/common/server/main.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package server
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"io/ioutil"
    14  	"net"
    15  	"net/http"
    16  	_ "net/http/pprof" // This is essentially the main package for the orderer
    17  	"os"
    18  	"os/signal"
    19  	"sync"
    20  	"syscall"
    21  	"time"
    22  
    23  	"github.com/golang/protobuf/proto"
    24  	"github.com/hyperledger/fabric-lib-go/healthz"
    25  	cb "github.com/hyperledger/fabric-protos-go/common"
    26  	ab "github.com/hyperledger/fabric-protos-go/orderer"
    27  	"github.com/hyperledger/fabric/bccsp"
    28  	"github.com/hyperledger/fabric/bccsp/factory"
    29  	"github.com/hyperledger/fabric/common/channelconfig"
    30  	"github.com/hyperledger/fabric/common/crypto"
    31  	"github.com/hyperledger/fabric/common/flogging"
    32  	floggingmetrics "github.com/hyperledger/fabric/common/flogging/metrics"
    33  	"github.com/hyperledger/fabric/common/grpclogging"
    34  	"github.com/hyperledger/fabric/common/grpcmetrics"
    35  	"github.com/hyperledger/fabric/common/ledger/blockledger"
    36  	"github.com/hyperledger/fabric/common/metrics"
    37  	"github.com/hyperledger/fabric/common/metrics/disabled"
    38  	"github.com/hyperledger/fabric/common/tools/protolator"
    39  	"github.com/hyperledger/fabric/core/comm"
    40  	"github.com/hyperledger/fabric/core/operations"
    41  	"github.com/hyperledger/fabric/internal/pkg/identity"
    42  	"github.com/hyperledger/fabric/msp"
    43  	"github.com/hyperledger/fabric/orderer/common/bootstrap/file"
    44  	"github.com/hyperledger/fabric/orderer/common/cluster"
    45  	"github.com/hyperledger/fabric/orderer/common/localconfig"
    46  	"github.com/hyperledger/fabric/orderer/common/metadata"
    47  	"github.com/hyperledger/fabric/orderer/common/multichannel"
    48  	"github.com/hyperledger/fabric/orderer/consensus"
    49  	"github.com/hyperledger/fabric/orderer/consensus/etcdraft"
    50  	"github.com/hyperledger/fabric/orderer/consensus/kafka"
    51  	"github.com/hyperledger/fabric/orderer/consensus/solo"
    52  	"github.com/hyperledger/fabric/protoutil"
    53  	"go.uber.org/zap/zapcore"
    54  	"google.golang.org/grpc"
    55  	"gopkg.in/alecthomas/kingpin.v2"
    56  )
    57  
    58  var logger = flogging.MustGetLogger("orderer.common.server")
    59  
    60  //command line flags
    61  var (
    62  	app = kingpin.New("orderer", "Hyperledger Fabric orderer node")
    63  
    64  	_       = app.Command("start", "Start the orderer node").Default() // preserved for cli compatibility
    65  	version = app.Command("version", "Show version information")
    66  
    67  	clusterTypes = map[string]struct{}{"etcdraft": {}}
    68  )
    69  
    70  // Main is the entry point of orderer process
    71  func Main() {
    72  	fullCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
    73  
    74  	// "version" command
    75  	if fullCmd == version.FullCommand() {
    76  		fmt.Println(metadata.GetVersionInfo())
    77  		return
    78  	}
    79  
    80  	conf, err := localconfig.Load()
    81  	if err != nil {
    82  		logger.Error("failed to parse config: ", err)
    83  		os.Exit(1)
    84  	}
    85  	initializeLogging()
    86  
    87  	prettyPrintStruct(conf)
    88  
    89  	cryptoProvider := factory.GetDefault()
    90  
    91  	signer, signErr := loadLocalMSP(conf).GetDefaultSigningIdentity()
    92  	if signErr != nil {
    93  		logger.Panicf("Failed to get local MSP identity: %s", signErr)
    94  	}
    95  
    96  	opsSystem := newOperationsSystem(conf.Operations, conf.Metrics)
    97  	if err = opsSystem.Start(); err != nil {
    98  		logger.Panicf("failed to initialize operations subsystem: %s", err)
    99  	}
   100  	defer opsSystem.Stop()
   101  	metricsProvider := opsSystem.Provider
   102  	logObserver := floggingmetrics.NewObserver(metricsProvider)
   103  	flogging.SetObserver(logObserver)
   104  
   105  	serverConfig := initializeServerConfig(conf, metricsProvider)
   106  	grpcServer := initializeGrpcServer(conf, serverConfig)
   107  	caMgr := &caManager{
   108  		appRootCAsByChain:     make(map[string][][]byte),
   109  		ordererRootCAsByChain: make(map[string][][]byte),
   110  		clientRootCAs:         serverConfig.SecOpts.ClientRootCAs,
   111  	}
   112  
   113  	lf, _, err := createLedgerFactory(conf, metricsProvider)
   114  	if err != nil {
   115  		logger.Panicf("Failed to create ledger factory: %v", err)
   116  	}
   117  
   118  	var clusterBootBlock *cb.Block
   119  	// configure following artifacts properly if orderer is of cluster type
   120  	var r *replicationInitiator
   121  	clusterServerConfig := serverConfig
   122  	clusterGRPCServer := grpcServer // by default, cluster shares the same grpc server
   123  	var clusterClientConfig comm.ClientConfig
   124  	var clusterDialer *cluster.PredicateDialer
   125  	var clusterType, reuseGrpcListener bool
   126  	var serversToUpdate []*comm.GRPCServer
   127  	if conf.General.BootstrapMethod == "file" {
   128  		bootstrapBlock := extractBootstrapBlock(conf)
   129  		if err := ValidateBootstrapBlock(bootstrapBlock, cryptoProvider); err != nil {
   130  			logger.Panicf("Failed validating bootstrap block: %v", err)
   131  		}
   132  		sysChanLastConfigBlock := extractSysChanLastConfig(lf, bootstrapBlock)
   133  		clusterBootBlock = selectClusterBootBlock(bootstrapBlock, sysChanLastConfigBlock)
   134  
   135  		typ := consensusType(bootstrapBlock, cryptoProvider)
   136  		clusterType = isClusterType(clusterBootBlock, cryptoProvider)
   137  		if clusterType {
   138  			logger.Infof("Setting up cluster for orderer type %s", typ)
   139  			clusterClientConfig = initializeClusterClientConfig(conf)
   140  			clusterDialer = &cluster.PredicateDialer{
   141  				Config: clusterClientConfig,
   142  			}
   143  
   144  			r = createReplicator(lf, bootstrapBlock, conf, clusterClientConfig.SecOpts, signer, cryptoProvider)
   145  			// Only clusters that are equipped with a recent config block can replicate.
   146  			if conf.General.BootstrapMethod == "file" {
   147  				r.replicateIfNeeded(bootstrapBlock)
   148  			}
   149  
   150  			if reuseGrpcListener = reuseListener(conf, typ); !reuseGrpcListener {
   151  				clusterServerConfig, clusterGRPCServer = configureClusterListener(conf, serverConfig, ioutil.ReadFile)
   152  			}
   153  
   154  			// If we have a separate gRPC server for the cluster,
   155  			// we need to update its TLS CA certificate pool.
   156  			serversToUpdate = append(serversToUpdate, clusterGRPCServer)
   157  		}
   158  		// Are we bootstrapping?
   159  		if len(lf.ChannelIDs()) == 0 {
   160  			initializeBootstrapChannel(clusterBootBlock, lf)
   161  		} else {
   162  			logger.Info("Not bootstrapping because of existing channels")
   163  		}
   164  
   165  	}
   166  
   167  	identityBytes, err := signer.Serialize()
   168  	if err != nil {
   169  		logger.Panicf("Failed serializing signing identity: %v", err)
   170  	}
   171  
   172  	expirationLogger := flogging.MustGetLogger("certmonitor")
   173  	crypto.TrackExpiration(
   174  		serverConfig.SecOpts.UseTLS,
   175  		serverConfig.SecOpts.Certificate,
   176  		[][]byte{clusterClientConfig.SecOpts.Certificate},
   177  		identityBytes,
   178  		expirationLogger.Warnf, // This can be used to piggyback a metric event in the future
   179  		time.Now(),
   180  		time.AfterFunc)
   181  
   182  	// if cluster is reusing client-facing server, then it is already
   183  	// appended to serversToUpdate at this point.
   184  	if grpcServer.MutualTLSRequired() && !reuseGrpcListener {
   185  		serversToUpdate = append(serversToUpdate, grpcServer)
   186  	}
   187  
   188  	tlsCallback := func(bundle *channelconfig.Bundle) {
   189  		logger.Debug("Executing callback to update root CAs")
   190  		caMgr.updateTrustedRoots(bundle, serversToUpdate...)
   191  		if clusterType {
   192  			caMgr.updateClusterDialer(
   193  				clusterDialer,
   194  				clusterClientConfig.SecOpts.ServerRootCAs,
   195  			)
   196  		}
   197  	}
   198  
   199  	manager := initializeMultichannelRegistrar(
   200  		clusterBootBlock,
   201  		r,
   202  		clusterDialer,
   203  		clusterServerConfig,
   204  		clusterGRPCServer,
   205  		conf,
   206  		signer,
   207  		metricsProvider,
   208  		opsSystem,
   209  		lf,
   210  		cryptoProvider,
   211  		tlsCallback,
   212  	)
   213  
   214  	mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert
   215  	server := NewServer(
   216  		manager,
   217  		metricsProvider,
   218  		&conf.Debug,
   219  		conf.General.Authentication.TimeWindow,
   220  		mutualTLS,
   221  		conf.General.Authentication.NoExpirationChecks,
   222  	)
   223  
   224  	logger.Infof("Starting %s", metadata.GetVersionInfo())
   225  	go handleSignals(addPlatformSignals(map[os.Signal]func(){
   226  		syscall.SIGTERM: func() {
   227  			grpcServer.Stop()
   228  			if clusterGRPCServer != grpcServer {
   229  				clusterGRPCServer.Stop()
   230  			}
   231  		},
   232  	}))
   233  
   234  	if !reuseGrpcListener && clusterType {
   235  		logger.Info("Starting cluster listener on", clusterGRPCServer.Address())
   236  		go clusterGRPCServer.Start()
   237  	}
   238  
   239  	if conf.General.Profile.Enabled {
   240  		go initializeProfilingService(conf)
   241  	}
   242  	ab.RegisterAtomicBroadcastServer(grpcServer.Server(), server)
   243  	logger.Info("Beginning to serve requests")
   244  	grpcServer.Start()
   245  }
   246  
   247  func reuseListener(conf *localconfig.TopLevel, typ string) bool {
   248  	clusterConf := conf.General.Cluster
   249  	// If listen address is not configured, and the TLS certificate isn't configured,
   250  	// it means we use the general listener of the node.
   251  	if clusterConf.ListenPort == 0 && clusterConf.ServerCertificate == "" && clusterConf.ListenAddress == "" && clusterConf.ServerPrivateKey == "" {
   252  		logger.Info("Cluster listener is not configured, defaulting to use the general listener on port", conf.General.ListenPort)
   253  
   254  		if !conf.General.TLS.Enabled {
   255  			logger.Panicf("TLS is required for running ordering nodes of type %s.", typ)
   256  		}
   257  
   258  		return true
   259  	}
   260  
   261  	// Else, one of the above is defined, so all 4 properties should be defined.
   262  	if clusterConf.ListenPort == 0 || clusterConf.ServerCertificate == "" || clusterConf.ListenAddress == "" || clusterConf.ServerPrivateKey == "" {
   263  		logger.Panic("Options: General.Cluster.ListenPort, General.Cluster.ListenAddress, General.Cluster.ServerCertificate," +
   264  			" General.Cluster.ServerPrivateKey, should be defined altogether.")
   265  	}
   266  
   267  	return false
   268  }
   269  
   270  // Extract system channel last config block
   271  func extractSysChanLastConfig(lf blockledger.Factory, bootstrapBlock *cb.Block) *cb.Block {
   272  	// Are we bootstrapping?
   273  	channelCount := len(lf.ChannelIDs())
   274  	if channelCount == 0 {
   275  		logger.Info("Bootstrapping because no existing channels")
   276  		return nil
   277  	}
   278  	logger.Infof("Not bootstrapping because of %d existing channels", channelCount)
   279  
   280  	systemChannelName, err := protoutil.GetChainIDFromBlock(bootstrapBlock)
   281  	if err != nil {
   282  		logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
   283  	}
   284  	systemChannelLedger, err := lf.GetOrCreate(systemChannelName)
   285  	if err != nil {
   286  		logger.Panicf("Failed getting system channel ledger: %v", err)
   287  	}
   288  	height := systemChannelLedger.Height()
   289  	lastConfigBlock := multichannel.ConfigBlock(systemChannelLedger)
   290  	logger.Infof("System channel: name=%s, height=%d, last config block number=%d",
   291  		systemChannelName, height, lastConfigBlock.Header.Number)
   292  	return lastConfigBlock
   293  }
   294  
   295  // Select cluster boot block
   296  func selectClusterBootBlock(bootstrapBlock, sysChanLastConfig *cb.Block) *cb.Block {
   297  	if sysChanLastConfig == nil {
   298  		logger.Debug("Selected bootstrap block, because system channel last config block is nil")
   299  		return bootstrapBlock
   300  	}
   301  
   302  	if sysChanLastConfig.Header.Number > bootstrapBlock.Header.Number {
   303  		logger.Infof("Cluster boot block is system channel last config block; Blocks Header.Number system-channel=%d, bootstrap=%d",
   304  			sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
   305  		return sysChanLastConfig
   306  	}
   307  
   308  	logger.Infof("Cluster boot block is bootstrap (genesis) block; Blocks Header.Number system-channel=%d, bootstrap=%d",
   309  		sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
   310  	return bootstrapBlock
   311  }
   312  
   313  func createReplicator(
   314  	lf blockledger.Factory,
   315  	bootstrapBlock *cb.Block,
   316  	conf *localconfig.TopLevel,
   317  	secOpts comm.SecureOptions,
   318  	signer identity.SignerSerializer,
   319  	bccsp bccsp.BCCSP,
   320  ) *replicationInitiator {
   321  	logger := flogging.MustGetLogger("orderer.common.cluster")
   322  
   323  	vl := &verifierLoader{
   324  		verifierFactory: &cluster.BlockVerifierAssembler{Logger: logger, BCCSP: bccsp},
   325  		onFailure: func(block *cb.Block) {
   326  			protolator.DeepMarshalJSON(os.Stdout, block)
   327  		},
   328  		ledgerFactory: lf,
   329  		logger:        logger,
   330  	}
   331  
   332  	systemChannelName, err := protoutil.GetChainIDFromBlock(bootstrapBlock)
   333  	if err != nil {
   334  		logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
   335  	}
   336  
   337  	// System channel is not verified because we trust the bootstrap block
   338  	// and use backward hash chain verification.
   339  	verifiersByChannel := vl.loadVerifiers()
   340  	verifiersByChannel[systemChannelName] = &cluster.NoopBlockVerifier{}
   341  
   342  	vr := &cluster.VerificationRegistry{
   343  		LoadVerifier:       vl.loadVerifier,
   344  		Logger:             logger,
   345  		VerifiersByChannel: verifiersByChannel,
   346  		VerifierFactory:    &cluster.BlockVerifierAssembler{Logger: logger, BCCSP: bccsp},
   347  	}
   348  
   349  	ledgerFactory := &ledgerFactory{
   350  		Factory:       lf,
   351  		onBlockCommit: vr.BlockCommitted,
   352  	}
   353  	return &replicationInitiator{
   354  		registerChain:     vr.RegisterVerifier,
   355  		verifierRetriever: vr,
   356  		logger:            logger,
   357  		secOpts:           secOpts,
   358  		conf:              conf,
   359  		lf:                ledgerFactory,
   360  		signer:            signer,
   361  		cryptoProvider:    bccsp,
   362  	}
   363  }
   364  
   365  func initializeLogging() {
   366  	loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
   367  	loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
   368  	flogging.Init(flogging.Config{
   369  		Format:  loggingFormat,
   370  		Writer:  os.Stderr,
   371  		LogSpec: loggingSpec,
   372  	})
   373  }
   374  
   375  // Start the profiling service if enabled.
   376  func initializeProfilingService(conf *localconfig.TopLevel) {
   377  	logger.Info("Starting Go pprof profiling service on:", conf.General.Profile.Address)
   378  	// The ListenAndServe() call does not return unless an error occurs.
   379  	logger.Panic("Go pprof service failed:", http.ListenAndServe(conf.General.Profile.Address, nil))
   380  }
   381  
   382  func handleSignals(handlers map[os.Signal]func()) {
   383  	var signals []os.Signal
   384  	for sig := range handlers {
   385  		signals = append(signals, sig)
   386  	}
   387  
   388  	signalChan := make(chan os.Signal, 1)
   389  	signal.Notify(signalChan, signals...)
   390  
   391  	for sig := range signalChan {
   392  		logger.Infof("Received signal: %d (%s)", sig, sig)
   393  		handlers[sig]()
   394  	}
   395  }
   396  
   397  type loadPEMFunc func(string) ([]byte, error)
   398  
   399  // configureClusterListener returns a new ServerConfig and a new gRPC server (with its own TLS listener).
   400  func configureClusterListener(conf *localconfig.TopLevel, generalConf comm.ServerConfig, loadPEM loadPEMFunc) (comm.ServerConfig, *comm.GRPCServer) {
   401  	clusterConf := conf.General.Cluster
   402  
   403  	cert, err := loadPEM(clusterConf.ServerCertificate)
   404  	if err != nil {
   405  		logger.Panicf("Failed to load cluster server certificate from '%s' (%s)", clusterConf.ServerCertificate, err)
   406  	}
   407  
   408  	key, err := loadPEM(clusterConf.ServerPrivateKey)
   409  	if err != nil {
   410  		logger.Panicf("Failed to load cluster server key from '%s' (%s)", clusterConf.ServerPrivateKey, err)
   411  	}
   412  
   413  	port := fmt.Sprintf("%d", clusterConf.ListenPort)
   414  	bindAddr := net.JoinHostPort(clusterConf.ListenAddress, port)
   415  
   416  	var clientRootCAs [][]byte
   417  	for _, serverRoot := range conf.General.Cluster.RootCAs {
   418  		rootCACert, err := loadPEM(serverRoot)
   419  		if err != nil {
   420  			logger.Panicf("Failed to load CA cert file '%s' (%s)", serverRoot, err)
   421  		}
   422  		clientRootCAs = append(clientRootCAs, rootCACert)
   423  	}
   424  
   425  	serverConf := comm.ServerConfig{
   426  		StreamInterceptors: generalConf.StreamInterceptors,
   427  		UnaryInterceptors:  generalConf.UnaryInterceptors,
   428  		ConnectionTimeout:  generalConf.ConnectionTimeout,
   429  		ServerStatsHandler: generalConf.ServerStatsHandler,
   430  		Logger:             generalConf.Logger,
   431  		KaOpts:             generalConf.KaOpts,
   432  		SecOpts: comm.SecureOptions{
   433  			TimeShift:         conf.General.Cluster.TLSHandshakeTimeShift,
   434  			CipherSuites:      comm.DefaultTLSCipherSuites,
   435  			ClientRootCAs:     clientRootCAs,
   436  			RequireClientCert: true,
   437  			Certificate:       cert,
   438  			UseTLS:            true,
   439  			Key:               key,
   440  		},
   441  	}
   442  
   443  	srv, err := comm.NewGRPCServer(bindAddr, serverConf)
   444  	if err != nil {
   445  		logger.Panicf("Failed creating gRPC server on %s:%d due to %v", clusterConf.ListenAddress, clusterConf.ListenPort, err)
   446  	}
   447  
   448  	return serverConf, srv
   449  }
   450  
   451  func initializeClusterClientConfig(conf *localconfig.TopLevel) comm.ClientConfig {
   452  	cc := comm.ClientConfig{
   453  		AsyncConnect: true,
   454  		KaOpts:       comm.DefaultKeepaliveOptions,
   455  		Timeout:      conf.General.Cluster.DialTimeout,
   456  		SecOpts:      comm.SecureOptions{},
   457  	}
   458  
   459  	if conf.General.Cluster.ClientCertificate == "" {
   460  		return cc
   461  	}
   462  
   463  	certFile := conf.General.Cluster.ClientCertificate
   464  	certBytes, err := ioutil.ReadFile(certFile)
   465  	if err != nil {
   466  		logger.Fatalf("Failed to load client TLS certificate file '%s' (%s)", certFile, err)
   467  	}
   468  
   469  	keyFile := conf.General.Cluster.ClientPrivateKey
   470  	keyBytes, err := ioutil.ReadFile(keyFile)
   471  	if err != nil {
   472  		logger.Fatalf("Failed to load client TLS key file '%s' (%s)", keyFile, err)
   473  	}
   474  
   475  	var serverRootCAs [][]byte
   476  	for _, serverRoot := range conf.General.Cluster.RootCAs {
   477  		rootCACert, err := ioutil.ReadFile(serverRoot)
   478  		if err != nil {
   479  			logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)", serverRoot, err)
   480  		}
   481  		serverRootCAs = append(serverRootCAs, rootCACert)
   482  	}
   483  
   484  	cc.SecOpts = comm.SecureOptions{
   485  		TimeShift:         conf.General.Cluster.TLSHandshakeTimeShift,
   486  		RequireClientCert: true,
   487  		CipherSuites:      comm.DefaultTLSCipherSuites,
   488  		ServerRootCAs:     serverRootCAs,
   489  		Certificate:       certBytes,
   490  		Key:               keyBytes,
   491  		UseTLS:            true,
   492  	}
   493  
   494  	return cc
   495  }
   496  
   497  func initializeServerConfig(conf *localconfig.TopLevel, metricsProvider metrics.Provider) comm.ServerConfig {
   498  	// secure server config
   499  	secureOpts := comm.SecureOptions{
   500  		UseTLS:            conf.General.TLS.Enabled,
   501  		RequireClientCert: conf.General.TLS.ClientAuthRequired,
   502  	}
   503  	// check to see if TLS is enabled
   504  	if secureOpts.UseTLS {
   505  		msg := "TLS"
   506  		// load crypto material from files
   507  		serverCertificate, err := ioutil.ReadFile(conf.General.TLS.Certificate)
   508  		if err != nil {
   509  			logger.Fatalf("Failed to load server Certificate file '%s' (%s)",
   510  				conf.General.TLS.Certificate, err)
   511  		}
   512  		serverKey, err := ioutil.ReadFile(conf.General.TLS.PrivateKey)
   513  		if err != nil {
   514  			logger.Fatalf("Failed to load PrivateKey file '%s' (%s)",
   515  				conf.General.TLS.PrivateKey, err)
   516  		}
   517  		var serverRootCAs, clientRootCAs [][]byte
   518  		for _, serverRoot := range conf.General.TLS.RootCAs {
   519  			root, err := ioutil.ReadFile(serverRoot)
   520  			if err != nil {
   521  				logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)",
   522  					err, serverRoot)
   523  			}
   524  			serverRootCAs = append(serverRootCAs, root)
   525  		}
   526  		if secureOpts.RequireClientCert {
   527  			for _, clientRoot := range conf.General.TLS.ClientRootCAs {
   528  				root, err := ioutil.ReadFile(clientRoot)
   529  				if err != nil {
   530  					logger.Fatalf("Failed to load ClientRootCAs file '%s' (%s)",
   531  						err, clientRoot)
   532  				}
   533  				clientRootCAs = append(clientRootCAs, root)
   534  			}
   535  			msg = "mutual TLS"
   536  		}
   537  		secureOpts.Key = serverKey
   538  		secureOpts.Certificate = serverCertificate
   539  		secureOpts.ServerRootCAs = serverRootCAs
   540  		secureOpts.ClientRootCAs = clientRootCAs
   541  		logger.Infof("Starting orderer with %s enabled", msg)
   542  	}
   543  	kaOpts := comm.DefaultKeepaliveOptions
   544  	// keepalive settings
   545  	// ServerMinInterval must be greater than 0
   546  	if conf.General.Keepalive.ServerMinInterval > time.Duration(0) {
   547  		kaOpts.ServerMinInterval = conf.General.Keepalive.ServerMinInterval
   548  	}
   549  	kaOpts.ServerInterval = conf.General.Keepalive.ServerInterval
   550  	kaOpts.ServerTimeout = conf.General.Keepalive.ServerTimeout
   551  
   552  	commLogger := flogging.MustGetLogger("core.comm").With("server", "Orderer")
   553  
   554  	if metricsProvider == nil {
   555  		metricsProvider = &disabled.Provider{}
   556  	}
   557  
   558  	return comm.ServerConfig{
   559  		SecOpts:            secureOpts,
   560  		KaOpts:             kaOpts,
   561  		Logger:             commLogger,
   562  		ServerStatsHandler: comm.NewServerStatsHandler(metricsProvider),
   563  		ConnectionTimeout:  conf.General.ConnectionTimeout,
   564  		StreamInterceptors: []grpc.StreamServerInterceptor{
   565  			grpcmetrics.StreamServerInterceptor(grpcmetrics.NewStreamMetrics(metricsProvider)),
   566  			grpclogging.StreamServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()),
   567  		},
   568  		UnaryInterceptors: []grpc.UnaryServerInterceptor{
   569  			grpcmetrics.UnaryServerInterceptor(grpcmetrics.NewUnaryMetrics(metricsProvider)),
   570  			grpclogging.UnaryServerInterceptor(
   571  				flogging.MustGetLogger("comm.grpc.server").Zap(),
   572  				grpclogging.WithLeveler(grpclogging.LevelerFunc(grpcLeveler)),
   573  			),
   574  		},
   575  	}
   576  }
   577  
   578  func grpcLeveler(ctx context.Context, fullMethod string) zapcore.Level {
   579  	switch fullMethod {
   580  	case "/orderer.Cluster/Step":
   581  		return flogging.DisabledLevel
   582  	default:
   583  		return zapcore.InfoLevel
   584  	}
   585  }
   586  
   587  func extractBootstrapBlock(conf *localconfig.TopLevel) *cb.Block {
   588  	var bootstrapBlock *cb.Block
   589  
   590  	// Select the bootstrapping mechanism
   591  	switch conf.General.BootstrapMethod {
   592  	case "file": // For now, "file" is the only supported genesis method
   593  		bootstrapBlock = file.New(conf.General.BootstrapFile).GenesisBlock()
   594  	case "none": // simply honor the configuration value
   595  		return nil
   596  	default:
   597  		logger.Panic("Unknown genesis method:", conf.General.BootstrapMethod)
   598  	}
   599  
   600  	return bootstrapBlock
   601  }
   602  
   603  func initializeBootstrapChannel(genesisBlock *cb.Block, lf blockledger.Factory) {
   604  	chainID, err := protoutil.GetChainIDFromBlock(genesisBlock)
   605  	if err != nil {
   606  		logger.Fatal("Failed to parse channel ID from genesis block:", err)
   607  	}
   608  	gl, err := lf.GetOrCreate(chainID)
   609  	if err != nil {
   610  		logger.Fatal("Failed to create the system channel:", err)
   611  	}
   612  
   613  	if err := gl.Append(genesisBlock); err != nil {
   614  		logger.Fatal("Could not write genesis block to ledger:", err)
   615  	}
   616  }
   617  
   618  func isClusterType(genesisBlock *cb.Block, bccsp bccsp.BCCSP) bool {
   619  	_, exists := clusterTypes[consensusType(genesisBlock, bccsp)]
   620  	return exists
   621  }
   622  
   623  func consensusType(genesisBlock *cb.Block, bccsp bccsp.BCCSP) string {
   624  	if genesisBlock == nil || genesisBlock.Data == nil || len(genesisBlock.Data.Data) == 0 {
   625  		logger.Fatalf("Empty genesis block")
   626  	}
   627  	env := &cb.Envelope{}
   628  	if err := proto.Unmarshal(genesisBlock.Data.Data[0], env); err != nil {
   629  		logger.Fatalf("Failed to unmarshal the genesis block's envelope: %v", err)
   630  	}
   631  	bundle, err := channelconfig.NewBundleFromEnvelope(env, bccsp)
   632  	if err != nil {
   633  		logger.Fatalf("Failed creating bundle from the genesis block: %v", err)
   634  	}
   635  	ordConf, exists := bundle.OrdererConfig()
   636  	if !exists {
   637  		logger.Fatalf("Orderer config doesn't exist in bundle derived from genesis block")
   638  	}
   639  	return ordConf.ConsensusType()
   640  }
   641  
   642  func initializeGrpcServer(conf *localconfig.TopLevel, serverConfig comm.ServerConfig) *comm.GRPCServer {
   643  	lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", conf.General.ListenAddress, conf.General.ListenPort))
   644  	if err != nil {
   645  		logger.Fatal("Failed to listen:", err)
   646  	}
   647  
   648  	// Create GRPC server - return if an error occurs
   649  	grpcServer, err := comm.NewGRPCServerFromListener(lis, serverConfig)
   650  	if err != nil {
   651  		logger.Fatal("Failed to return new GRPC server:", err)
   652  	}
   653  
   654  	return grpcServer
   655  }
   656  
   657  func loadLocalMSP(conf *localconfig.TopLevel) msp.MSP {
   658  	// MUST call GetLocalMspConfig first, so that default BCCSP is properly
   659  	// initialized prior to LoadByType.
   660  	mspConfig, err := msp.GetLocalMspConfig(conf.General.LocalMSPDir, conf.General.BCCSP, conf.General.LocalMSPID)
   661  	if err != nil {
   662  		logger.Panicf("Failed to get local msp config: %v", err)
   663  	}
   664  
   665  	typ := msp.ProviderTypeToString(msp.FABRIC)
   666  	opts, found := msp.Options[typ]
   667  	if !found {
   668  		logger.Panicf("MSP option for type %s is not found", typ)
   669  	}
   670  
   671  	localmsp, err := msp.New(opts, factory.GetDefault())
   672  	if err != nil {
   673  		logger.Panicf("Failed to load local MSP: %v", err)
   674  	}
   675  
   676  	if err = localmsp.Setup(mspConfig); err != nil {
   677  		logger.Panicf("Failed to setup local msp with config: %v", err)
   678  	}
   679  
   680  	return localmsp
   681  }
   682  
   683  //go:generate counterfeiter -o mocks/health_checker.go -fake-name HealthChecker . healthChecker
   684  
   685  // HealthChecker defines the contract for health checker
   686  type healthChecker interface {
   687  	RegisterChecker(component string, checker healthz.HealthChecker) error
   688  }
   689  
   690  func initializeMultichannelRegistrar(
   691  	bootstrapBlock *cb.Block,
   692  	ri *replicationInitiator,
   693  	clusterDialer *cluster.PredicateDialer,
   694  	srvConf comm.ServerConfig,
   695  	srv *comm.GRPCServer,
   696  	conf *localconfig.TopLevel,
   697  	signer identity.SignerSerializer,
   698  	metricsProvider metrics.Provider,
   699  	healthChecker healthChecker,
   700  	lf blockledger.Factory,
   701  	bccsp bccsp.BCCSP,
   702  	callbacks ...channelconfig.BundleActor,
   703  ) *multichannel.Registrar {
   704  
   705  	registrar := multichannel.NewRegistrar(*conf, lf, signer, metricsProvider, bccsp, callbacks...)
   706  
   707  	consenters := map[string]consensus.Consenter{}
   708  
   709  	var icr etcdraft.InactiveChainRegistry
   710  	if conf.General.BootstrapMethod == "file" && isClusterType(bootstrapBlock, bccsp) {
   711  		etcdConsenter := initializeEtcdraftConsenter(consenters, conf, lf, clusterDialer, bootstrapBlock, ri, srvConf, srv, registrar, metricsProvider, bccsp)
   712  		icr = etcdConsenter.InactiveChainRegistry
   713  	}
   714  
   715  	consenters["solo"] = solo.New()
   716  	var kafkaMetrics *kafka.Metrics
   717  	consenters["kafka"], kafkaMetrics = kafka.New(conf.Kafka, metricsProvider, healthChecker, icr, registrar.CreateChain)
   718  	// Note, we pass a 'nil' channel here, we could pass a channel that
   719  	// closes if we wished to cleanup this routine on exit.
   720  	go kafkaMetrics.PollGoMetricsUntilStop(time.Minute, nil)
   721  	registrar.Initialize(consenters)
   722  	return registrar
   723  }
   724  
   725  func initializeEtcdraftConsenter(
   726  	consenters map[string]consensus.Consenter,
   727  	conf *localconfig.TopLevel,
   728  	lf blockledger.Factory,
   729  	clusterDialer *cluster.PredicateDialer,
   730  	bootstrapBlock *cb.Block,
   731  	ri *replicationInitiator,
   732  	srvConf comm.ServerConfig,
   733  	srv *comm.GRPCServer,
   734  	registrar *multichannel.Registrar,
   735  	metricsProvider metrics.Provider,
   736  	bccsp bccsp.BCCSP,
   737  ) *etcdraft.Consenter {
   738  	replicationRefreshInterval := conf.General.Cluster.ReplicationBackgroundRefreshInterval
   739  	if replicationRefreshInterval == 0 {
   740  		replicationRefreshInterval = defaultReplicationBackgroundRefreshInterval
   741  	}
   742  
   743  	systemChannelName, err := protoutil.GetChainIDFromBlock(bootstrapBlock)
   744  	if err != nil {
   745  		ri.logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
   746  	}
   747  	systemLedger, err := lf.GetOrCreate(systemChannelName)
   748  	if err != nil {
   749  		ri.logger.Panicf("Failed obtaining system channel (%s) ledger: %v", systemChannelName, err)
   750  	}
   751  	getConfigBlock := func() *cb.Block {
   752  		return multichannel.ConfigBlock(systemLedger)
   753  	}
   754  
   755  	exponentialSleep := exponentialDurationSeries(replicationBackgroundInitialRefreshInterval, replicationRefreshInterval)
   756  	ticker := newTicker(exponentialSleep)
   757  
   758  	icr := &inactiveChainReplicator{
   759  		logger:                            logger,
   760  		scheduleChan:                      ticker.C,
   761  		quitChan:                          make(chan struct{}),
   762  		replicator:                        ri,
   763  		chains2CreationCallbacks:          make(map[string]chainCreation),
   764  		retrieveLastSysChannelConfigBlock: getConfigBlock,
   765  		registerChain:                     ri.registerChain,
   766  	}
   767  
   768  	// Use the inactiveChainReplicator as a channel lister, since it has knowledge
   769  	// of all inactive chains.
   770  	// This is to prevent us pulling the entire system chain when attempting to enumerate
   771  	// the channels in the system.
   772  	ri.channelLister = icr
   773  
   774  	go icr.run()
   775  	raftConsenter := etcdraft.New(clusterDialer, conf, srvConf, srv, registrar, icr, metricsProvider, bccsp)
   776  	consenters["etcdraft"] = raftConsenter
   777  	return raftConsenter
   778  }
   779  
   780  func newOperationsSystem(ops localconfig.Operations, metrics localconfig.Metrics) *operations.System {
   781  	return operations.NewSystem(operations.Options{
   782  		Logger:        flogging.MustGetLogger("orderer.operations"),
   783  		ListenAddress: ops.ListenAddress,
   784  		Metrics: operations.MetricsOptions{
   785  			Provider: metrics.Provider,
   786  			Statsd: &operations.Statsd{
   787  				Network:       metrics.Statsd.Network,
   788  				Address:       metrics.Statsd.Address,
   789  				WriteInterval: metrics.Statsd.WriteInterval,
   790  				Prefix:        metrics.Statsd.Prefix,
   791  			},
   792  		},
   793  		TLS: operations.TLS{
   794  			Enabled:            ops.TLS.Enabled,
   795  			CertFile:           ops.TLS.Certificate,
   796  			KeyFile:            ops.TLS.PrivateKey,
   797  			ClientCertRequired: ops.TLS.ClientAuthRequired,
   798  			ClientCACertFiles:  ops.TLS.ClientRootCAs,
   799  		},
   800  		Version: metadata.Version,
   801  	})
   802  }
   803  
   804  // caMgr manages certificate authorities scoped by channel
   805  type caManager struct {
   806  	sync.Mutex
   807  	appRootCAsByChain     map[string][][]byte
   808  	ordererRootCAsByChain map[string][][]byte
   809  	clientRootCAs         [][]byte
   810  }
   811  
   812  func (mgr *caManager) updateTrustedRoots(
   813  	cm channelconfig.Resources,
   814  	servers ...*comm.GRPCServer,
   815  ) {
   816  	mgr.Lock()
   817  	defer mgr.Unlock()
   818  
   819  	appRootCAs := [][]byte{}
   820  	ordererRootCAs := [][]byte{}
   821  	appOrgMSPs := make(map[string]struct{})
   822  	ordOrgMSPs := make(map[string]struct{})
   823  
   824  	if ac, ok := cm.ApplicationConfig(); ok {
   825  		//loop through app orgs and build map of MSPIDs
   826  		for _, appOrg := range ac.Organizations() {
   827  			appOrgMSPs[appOrg.MSPID()] = struct{}{}
   828  		}
   829  	}
   830  
   831  	if ac, ok := cm.OrdererConfig(); ok {
   832  		//loop through orderer orgs and build map of MSPIDs
   833  		for _, ordOrg := range ac.Organizations() {
   834  			ordOrgMSPs[ordOrg.MSPID()] = struct{}{}
   835  		}
   836  	}
   837  
   838  	if cc, ok := cm.ConsortiumsConfig(); ok {
   839  		for _, consortium := range cc.Consortiums() {
   840  			//loop through consortium orgs and build map of MSPIDs
   841  			for _, consortiumOrg := range consortium.Organizations() {
   842  				appOrgMSPs[consortiumOrg.MSPID()] = struct{}{}
   843  			}
   844  		}
   845  	}
   846  
   847  	cid := cm.ConfigtxValidator().ChannelID()
   848  	logger.Debugf("updating root CAs for channel [%s]", cid)
   849  	msps, err := cm.MSPManager().GetMSPs()
   850  	if err != nil {
   851  		logger.Errorf("Error getting root CAs for channel %s (%s)", cid, err)
   852  		return
   853  	}
   854  	for k, v := range msps {
   855  		// check to see if this is a FABRIC MSP
   856  		if v.GetType() == msp.FABRIC {
   857  			for _, root := range v.GetTLSRootCerts() {
   858  				// check to see of this is an app org MSP
   859  				if _, ok := appOrgMSPs[k]; ok {
   860  					logger.Debugf("adding app root CAs for MSP [%s]", k)
   861  					appRootCAs = append(appRootCAs, root)
   862  				}
   863  				// check to see of this is an orderer org MSP
   864  				if _, ok := ordOrgMSPs[k]; ok {
   865  					logger.Debugf("adding orderer root CAs for MSP [%s]", k)
   866  					ordererRootCAs = append(ordererRootCAs, root)
   867  				}
   868  			}
   869  			for _, intermediate := range v.GetTLSIntermediateCerts() {
   870  				// check to see of this is an app org MSP
   871  				if _, ok := appOrgMSPs[k]; ok {
   872  					logger.Debugf("adding app root CAs for MSP [%s]", k)
   873  					appRootCAs = append(appRootCAs, intermediate)
   874  				}
   875  				// check to see of this is an orderer org MSP
   876  				if _, ok := ordOrgMSPs[k]; ok {
   877  					logger.Debugf("adding orderer root CAs for MSP [%s]", k)
   878  					ordererRootCAs = append(ordererRootCAs, intermediate)
   879  				}
   880  			}
   881  		}
   882  	}
   883  	mgr.appRootCAsByChain[cid] = appRootCAs
   884  	mgr.ordererRootCAsByChain[cid] = ordererRootCAs
   885  
   886  	// now iterate over all roots for all app and orderer chains
   887  	trustedRoots := [][]byte{}
   888  	for _, roots := range mgr.appRootCAsByChain {
   889  		trustedRoots = append(trustedRoots, roots...)
   890  	}
   891  	for _, roots := range mgr.ordererRootCAsByChain {
   892  		trustedRoots = append(trustedRoots, roots...)
   893  	}
   894  	// also need to append statically configured root certs
   895  	if len(mgr.clientRootCAs) > 0 {
   896  		trustedRoots = append(trustedRoots, mgr.clientRootCAs...)
   897  	}
   898  
   899  	// now update the client roots for the gRPC server
   900  	for _, srv := range servers {
   901  		err = srv.SetClientRootCAs(trustedRoots)
   902  		if err != nil {
   903  			msg := "Failed to update trusted roots for orderer from latest config " +
   904  				"block.  This orderer may not be able to communicate " +
   905  				"with members of channel %s (%s)"
   906  			logger.Warningf(msg, cm.ConfigtxValidator().ChannelID(), err)
   907  		}
   908  	}
   909  }
   910  
   911  func (mgr *caManager) updateClusterDialer(
   912  	clusterDialer *cluster.PredicateDialer,
   913  	localClusterRootCAs [][]byte,
   914  ) {
   915  	mgr.Lock()
   916  	defer mgr.Unlock()
   917  
   918  	// Iterate over all orderer root CAs for all chains and add them
   919  	// to the root CAs
   920  	var clusterRootCAs [][]byte
   921  	for _, roots := range mgr.ordererRootCAsByChain {
   922  		clusterRootCAs = append(clusterRootCAs, roots...)
   923  	}
   924  
   925  	// Add the local root CAs too
   926  	clusterRootCAs = append(clusterRootCAs, localClusterRootCAs...)
   927  	// Update the cluster config with the new root CAs
   928  	clusterDialer.UpdateRootCAs(clusterRootCAs)
   929  }
   930  
   931  func prettyPrintStruct(i interface{}) {
   932  	params := localconfig.Flatten(i)
   933  	var buffer bytes.Buffer
   934  	for i := range params {
   935  		buffer.WriteString("\n\t")
   936  		buffer.WriteString(params[i])
   937  	}
   938  	logger.Infof("Orderer config values:%s\n", buffer.String())
   939  }