github.com/yous1230/fabric@v2.0.0-beta.0.20191224111736-74345bee6ac2+incompatible/internal/peer/node/start.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package node
     8  
     9  import (
    10  	"context"
    11  	"fmt"
    12  	"io"
    13  	"io/ioutil"
    14  	"net"
    15  	"net/http"
    16  	"os"
    17  	"os/signal"
    18  	"path/filepath"
    19  	"sync"
    20  	"syscall"
    21  	"time"
    22  
    23  	docker "github.com/fsouza/go-dockerclient"
    24  	"github.com/golang/protobuf/proto"
    25  	"github.com/hyperledger/fabric-protos-go/common"
    26  	cb "github.com/hyperledger/fabric-protos-go/common"
    27  	discprotos "github.com/hyperledger/fabric-protos-go/discovery"
    28  	pb "github.com/hyperledger/fabric-protos-go/peer"
    29  	"github.com/hyperledger/fabric/bccsp/factory"
    30  	"github.com/hyperledger/fabric/common/cauthdsl"
    31  	ccdef "github.com/hyperledger/fabric/common/chaincode"
    32  	"github.com/hyperledger/fabric/common/crypto"
    33  	"github.com/hyperledger/fabric/common/crypto/tlsgen"
    34  	"github.com/hyperledger/fabric/common/deliver"
    35  	"github.com/hyperledger/fabric/common/flogging"
    36  	floggingmetrics "github.com/hyperledger/fabric/common/flogging/metrics"
    37  	"github.com/hyperledger/fabric/common/grpclogging"
    38  	"github.com/hyperledger/fabric/common/grpcmetrics"
    39  	"github.com/hyperledger/fabric/common/metadata"
    40  	"github.com/hyperledger/fabric/common/metrics"
    41  	"github.com/hyperledger/fabric/common/policies"
    42  	"github.com/hyperledger/fabric/core/aclmgmt"
    43  	"github.com/hyperledger/fabric/core/cclifecycle"
    44  	"github.com/hyperledger/fabric/core/chaincode"
    45  	"github.com/hyperledger/fabric/core/chaincode/accesscontrol"
    46  	"github.com/hyperledger/fabric/core/chaincode/extcc"
    47  	"github.com/hyperledger/fabric/core/chaincode/lifecycle"
    48  	"github.com/hyperledger/fabric/core/chaincode/persistence"
    49  	"github.com/hyperledger/fabric/core/chaincode/platforms"
    50  	"github.com/hyperledger/fabric/core/comm"
    51  	"github.com/hyperledger/fabric/core/committer/txvalidator/plugin"
    52  	"github.com/hyperledger/fabric/core/common/ccprovider"
    53  	"github.com/hyperledger/fabric/core/common/privdata"
    54  	coreconfig "github.com/hyperledger/fabric/core/config"
    55  	"github.com/hyperledger/fabric/core/container"
    56  	"github.com/hyperledger/fabric/core/container/dockercontroller"
    57  	"github.com/hyperledger/fabric/core/container/externalbuilder"
    58  	"github.com/hyperledger/fabric/core/deliverservice"
    59  	"github.com/hyperledger/fabric/core/dispatcher"
    60  	"github.com/hyperledger/fabric/core/endorser"
    61  	authHandler "github.com/hyperledger/fabric/core/handlers/auth"
    62  	endorsement2 "github.com/hyperledger/fabric/core/handlers/endorsement/api"
    63  	endorsement3 "github.com/hyperledger/fabric/core/handlers/endorsement/api/identities"
    64  	"github.com/hyperledger/fabric/core/handlers/library"
    65  	validation "github.com/hyperledger/fabric/core/handlers/validation/api"
    66  	"github.com/hyperledger/fabric/core/ledger"
    67  	"github.com/hyperledger/fabric/core/ledger/cceventmgmt"
    68  	"github.com/hyperledger/fabric/core/ledger/kvledger"
    69  	"github.com/hyperledger/fabric/core/ledger/ledgermgmt"
    70  	"github.com/hyperledger/fabric/core/operations"
    71  	"github.com/hyperledger/fabric/core/peer"
    72  	"github.com/hyperledger/fabric/core/policy"
    73  	"github.com/hyperledger/fabric/core/scc"
    74  	"github.com/hyperledger/fabric/core/scc/cscc"
    75  	"github.com/hyperledger/fabric/core/scc/lscc"
    76  	"github.com/hyperledger/fabric/core/scc/qscc"
    77  	"github.com/hyperledger/fabric/core/transientstore"
    78  	"github.com/hyperledger/fabric/discovery"
    79  	"github.com/hyperledger/fabric/discovery/endorsement"
    80  	discsupport "github.com/hyperledger/fabric/discovery/support"
    81  	discacl "github.com/hyperledger/fabric/discovery/support/acl"
    82  	ccsupport "github.com/hyperledger/fabric/discovery/support/chaincode"
    83  	"github.com/hyperledger/fabric/discovery/support/config"
    84  	"github.com/hyperledger/fabric/discovery/support/gossip"
    85  	gossipcommon "github.com/hyperledger/fabric/gossip/common"
    86  	gossipgossip "github.com/hyperledger/fabric/gossip/gossip"
    87  	gossipmetrics "github.com/hyperledger/fabric/gossip/metrics"
    88  	"github.com/hyperledger/fabric/gossip/service"
    89  	gossipservice "github.com/hyperledger/fabric/gossip/service"
    90  	peergossip "github.com/hyperledger/fabric/internal/peer/gossip"
    91  	"github.com/hyperledger/fabric/internal/peer/version"
    92  	"github.com/hyperledger/fabric/msp"
    93  	"github.com/hyperledger/fabric/msp/mgmt"
    94  	"github.com/hyperledger/fabric/protoutil"
    95  	"github.com/pkg/errors"
    96  	"github.com/spf13/cobra"
    97  	"github.com/spf13/viper"
    98  	"google.golang.org/grpc"
    99  )
   100  
   101  const (
   102  	chaincodeAddrKey       = "peer.chaincodeAddress"
   103  	chaincodeListenAddrKey = "peer.chaincodeListenAddress"
   104  	defaultChaincodePort   = 7052
   105  )
   106  
   107  var chaincodeDevMode bool
   108  
   109  func startCmd() *cobra.Command {
   110  	// Set the flags on the node start command.
   111  	flags := nodeStartCmd.Flags()
   112  	flags.BoolVarP(&chaincodeDevMode, "peer-chaincodedev", "", false, "start peer in chaincode development mode")
   113  	return nodeStartCmd
   114  }
   115  
   116  var nodeStartCmd = &cobra.Command{
   117  	Use:   "start",
   118  	Short: "Starts the node.",
   119  	Long:  `Starts a node that interacts with the network.`,
   120  	RunE: func(cmd *cobra.Command, args []string) error {
   121  		if len(args) != 0 {
   122  			return fmt.Errorf("trailing args detected")
   123  		}
   124  		// Parsing of the command line is done so silence cmd usage
   125  		cmd.SilenceUsage = true
   126  		return serve(args)
   127  	},
   128  }
   129  
   130  // externalVMAdapter adapts coerces the result of Build to the
   131  // container.Interface type expected by the VM interface.
   132  type externalVMAdapter struct {
   133  	detector *externalbuilder.Detector
   134  }
   135  
   136  func (e externalVMAdapter) Build(
   137  	ccid string,
   138  	mdBytes []byte,
   139  	codePackage io.Reader,
   140  ) (container.Instance, error) {
   141  	i, err := e.detector.Build(ccid, mdBytes, codePackage)
   142  	if err != nil {
   143  		return nil, err
   144  	}
   145  
   146  	// ensure <nil> is returned instead of (*externalbuilder.Instance)(nil)
   147  	if i == nil {
   148  		return nil, nil
   149  	}
   150  	return i, err
   151  }
   152  
   153  type endorserChannelAdapter struct {
   154  	peer *peer.Peer
   155  }
   156  
   157  func (e endorserChannelAdapter) Channel(channelID string) *endorser.Channel {
   158  	if peerChannel := e.peer.Channel(channelID); peerChannel != nil {
   159  		return &endorser.Channel{
   160  			IdentityDeserializer: peerChannel.MSPManager(),
   161  		}
   162  	}
   163  
   164  	return nil
   165  }
   166  
   167  type custodianLauncherAdapter struct {
   168  	launcher      chaincode.Launcher
   169  	streamHandler extcc.StreamHandler
   170  }
   171  
   172  func (e custodianLauncherAdapter) Launch(ccid string) error {
   173  	return e.launcher.Launch(ccid, e.streamHandler)
   174  }
   175  
   176  func serve(args []string) error {
   177  	// currently the peer only works with the standard MSP
   178  	// because in certain scenarios the MSP has to make sure
   179  	// that from a single credential you only have a single 'identity'.
   180  	// Idemix does not support this *YET* but it can be easily
   181  	// fixed to support it. For now, we just make sure that
   182  	// the peer only comes up with the standard MSP
   183  	mspType := mgmt.GetLocalMSP(factory.GetDefault()).GetType()
   184  	if mspType != msp.FABRIC {
   185  		panic("Unsupported msp type " + msp.ProviderTypeToString(mspType))
   186  	}
   187  
   188  	// Trace RPCs with the golang.org/x/net/trace package. This was moved out of
   189  	// the deliver service connection factory as it has process wide implications
   190  	// and was racy with respect to initialization of gRPC clients and servers.
   191  	grpc.EnableTracing = true
   192  
   193  	logger.Infof("Starting %s", version.GetInfo())
   194  
   195  	//obtain coreConfiguration
   196  	coreConfig, err := peer.GlobalConfig()
   197  	if err != nil {
   198  		return err
   199  	}
   200  
   201  	platformRegistry := platforms.NewRegistry(platforms.SupportedPlatforms...)
   202  
   203  	identityDeserializerFactory := func(chainID string) msp.IdentityDeserializer {
   204  		return mgmt.GetManagerForChain(chainID)
   205  	}
   206  
   207  	opsSystem := newOperationsSystem(coreConfig)
   208  	err = opsSystem.Start()
   209  	if err != nil {
   210  		return errors.WithMessage(err, "failed to initialize operations subsystems")
   211  	}
   212  	defer opsSystem.Stop()
   213  
   214  	metricsProvider := opsSystem.Provider
   215  	logObserver := floggingmetrics.NewObserver(metricsProvider)
   216  	flogging.SetObserver(logObserver)
   217  
   218  	membershipInfoProvider := privdata.NewMembershipInfoProvider(createSelfSignedData(), identityDeserializerFactory)
   219  
   220  	mspID := coreConfig.LocalMSPID
   221  
   222  	chaincodeInstallPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "lifecycle", "chaincodes")
   223  	ccStore := persistence.NewStore(chaincodeInstallPath)
   224  	ccPackageParser := &persistence.ChaincodePackageParser{
   225  		MetadataProvider: ccprovider.PersistenceAdapter(ccprovider.MetadataAsTarEntries),
   226  	}
   227  
   228  	peerHost, _, err := net.SplitHostPort(coreConfig.PeerAddress)
   229  	if err != nil {
   230  		return fmt.Errorf("peer address is not in the format of host:port: %v", err)
   231  	}
   232  
   233  	listenAddr := coreConfig.ListenAddress
   234  	serverConfig, err := peer.GetServerConfig()
   235  	if err != nil {
   236  		logger.Fatalf("Error loading secure config for peer (%s)", err)
   237  	}
   238  
   239  	serverConfig.Logger = flogging.MustGetLogger("core.comm").With("server", "PeerServer")
   240  	serverConfig.ServerStatsHandler = comm.NewServerStatsHandler(metricsProvider)
   241  	serverConfig.UnaryInterceptors = append(
   242  		serverConfig.UnaryInterceptors,
   243  		grpcmetrics.UnaryServerInterceptor(grpcmetrics.NewUnaryMetrics(metricsProvider)),
   244  		grpclogging.UnaryServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()),
   245  	)
   246  	serverConfig.StreamInterceptors = append(
   247  		serverConfig.StreamInterceptors,
   248  		grpcmetrics.StreamServerInterceptor(grpcmetrics.NewStreamMetrics(metricsProvider)),
   249  		grpclogging.StreamServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()),
   250  	)
   251  
   252  	cs := comm.NewCredentialSupport()
   253  	if serverConfig.SecOpts.UseTLS {
   254  		logger.Info("Starting peer with TLS enabled")
   255  		cs = comm.NewCredentialSupport(serverConfig.SecOpts.ServerRootCAs...)
   256  
   257  		// set the cert to use if client auth is requested by remote endpoints
   258  		clientCert, err := peer.GetClientCertificate()
   259  		if err != nil {
   260  			logger.Fatalf("Failed to set TLS client certificate (%s)", err)
   261  		}
   262  		cs.SetClientCertificate(clientCert)
   263  	}
   264  
   265  	peerServer, err := comm.NewGRPCServer(listenAddr, serverConfig)
   266  	if err != nil {
   267  		logger.Fatalf("Failed to create peer server (%s)", err)
   268  	}
   269  
   270  	transientStoreProvider, err := transientstore.NewStoreProvider(
   271  		filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "transientstore"),
   272  	)
   273  	if err != nil {
   274  		return errors.WithMessage(err, "failed to open transient store")
   275  	}
   276  
   277  	deliverServiceConfig := deliverservice.GlobalConfig()
   278  
   279  	peerInstance := &peer.Peer{
   280  		Server:                   peerServer,
   281  		ServerConfig:             serverConfig,
   282  		CredentialSupport:        cs,
   283  		StoreProvider:            transientStoreProvider,
   284  		CryptoProvider:           factory.GetDefault(),
   285  		OrdererEndpointOverrides: deliverServiceConfig.OrdererEndpointOverrides,
   286  	}
   287  
   288  	localMSP := mgmt.GetLocalMSP(factory.GetDefault())
   289  	signingIdentity, err := localMSP.GetDefaultSigningIdentity()
   290  	if err != nil {
   291  		logger.Panicf("Could not get the default signing identity from the local MSP: [%+v]", err)
   292  	}
   293  
   294  	signingIdentityBytes, err := signingIdentity.Serialize()
   295  	if err != nil {
   296  		logger.Panicf("Failed to serialize the signing identity: %v", err)
   297  	}
   298  
   299  	expirationLogger := flogging.MustGetLogger("certmonitor")
   300  	crypto.TrackExpiration(
   301  		serverConfig.SecOpts.UseTLS,
   302  		serverConfig.SecOpts.Certificate,
   303  		cs.GetClientCertificate().Certificate,
   304  		signingIdentityBytes,
   305  		expirationLogger.Warnf, // This can be used to piggyback a metric event in the future
   306  		time.Now(),
   307  		time.AfterFunc)
   308  
   309  	policyMgr := policies.PolicyManagerGetterFunc(peerInstance.GetPolicyManager)
   310  
   311  	deliverGRPCClient, err := comm.NewGRPCClient(comm.ClientConfig{
   312  		Timeout: deliverServiceConfig.ConnectionTimeout,
   313  		KaOpts:  deliverServiceConfig.KeepaliveOptions,
   314  		SecOpts: deliverServiceConfig.SecOpts,
   315  	})
   316  	if err != nil {
   317  		logger.Panicf("Could not create the deliver grpc client: [%+v]", err)
   318  	}
   319  
   320  	// FIXME: Creating the gossip service has the side effect of starting a bunch
   321  	// of go routines and registration with the grpc server.
   322  	gossipService, err := initGossipService(
   323  		policyMgr,
   324  		metricsProvider,
   325  		peerServer,
   326  		signingIdentity,
   327  		cs,
   328  		coreConfig.PeerAddress,
   329  		deliverGRPCClient,
   330  		deliverServiceConfig,
   331  	)
   332  	if err != nil {
   333  		return errors.WithMessage(err, "failed to initialize gossip service")
   334  	}
   335  	defer gossipService.Stop()
   336  
   337  	peerInstance.GossipService = gossipService
   338  
   339  	policyChecker := policy.NewPolicyChecker(
   340  		policies.PolicyManagerGetterFunc(peerInstance.GetPolicyManager),
   341  		mgmt.GetLocalMSP(factory.GetDefault()),
   342  		mgmt.NewLocalMSPPrincipalGetter(factory.GetDefault()),
   343  	)
   344  
   345  	//startup aclmgmt with default ACL providers (resource based and default 1.0 policies based).
   346  	//Users can pass in their own ACLProvider to RegisterACLProvider (currently unit tests do this)
   347  	aclProvider := aclmgmt.NewACLProvider(
   348  		aclmgmt.ResourceGetter(peerInstance.GetStableChannelConfig),
   349  		policyChecker,
   350  	)
   351  
   352  	// TODO, unfortunately, the lifecycle initialization is very unclean at the
   353  	// moment. This is because ccprovider.SetChaincodePath only works after
   354  	// ledgermgmt.Initialize, but ledgermgmt.Initialize requires a reference to
   355  	// lifecycle.  Finally, lscc requires a reference to the system chaincode
   356  	// provider in order to be created, which requires chaincode support to be
   357  	// up, which also requires, you guessed it, lifecycle. Once we remove the
   358  	// v1.0 lifecycle, we should be good to collapse all of the init of lifecycle
   359  	// to this point.
   360  	lifecycleResources := &lifecycle.Resources{
   361  		Serializer:          &lifecycle.Serializer{},
   362  		ChannelConfigSource: peerInstance,
   363  		ChaincodeStore:      ccStore,
   364  		PackageParser:       ccPackageParser,
   365  	}
   366  
   367  	lifecycleValidatorCommitter := &lifecycle.ValidatorCommitter{
   368  		Resources:                    lifecycleResources,
   369  		LegacyDeployedCCInfoProvider: &lscc.DeployedCCInfoProvider{},
   370  	}
   371  
   372  	ccInfoFSImpl := &ccprovider.CCInfoFSImpl{GetHasher: factory.GetDefault()}
   373  
   374  	// legacyMetadataManager collects metadata information from the legacy
   375  	// lifecycle (lscc). This is expected to disappear with FAB-15061.
   376  	legacyMetadataManager, err := cclifecycle.NewMetadataManager(
   377  		cclifecycle.EnumerateFunc(
   378  			func() ([]ccdef.InstalledChaincode, error) {
   379  				return ccInfoFSImpl.ListInstalledChaincodes(ccInfoFSImpl.GetChaincodeInstallPath(), ioutil.ReadDir, ccprovider.LoadPackage)
   380  			},
   381  		),
   382  	)
   383  	if err != nil {
   384  		logger.Panicf("Failed creating LegacyMetadataManager: +%v", err)
   385  	}
   386  
   387  	// metadataManager aggregates metadata information from _lifecycle and
   388  	// the legacy lifecycle (lscc).
   389  	metadataManager := lifecycle.NewMetadataManager()
   390  
   391  	// the purpose of these two managers is to feed per-channel chaincode data
   392  	// into gossip owing to the fact that we are transitioning from lscc to
   393  	// _lifecycle, we still have two providers of such information until v2.1,
   394  	// in which we will remove the legacy.
   395  	//
   396  	// the flow of information is the following
   397  	//
   398  	// gossip <-- metadataManager <-- lifecycleCache  (for _lifecycle)
   399  	//                             \
   400  	//                              - legacyMetadataManager (for lscc)
   401  	//
   402  	// FAB-15061 tracks the work necessary to remove LSCC, at which point we
   403  	// will be able to simplify the flow to simply be
   404  	//
   405  	// gossip <-- lifecycleCache
   406  
   407  	chaincodeCustodian := lifecycle.NewChaincodeCustodian()
   408  
   409  	externalBuilderOutput := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "externalbuilder", "builds")
   410  	if err := os.MkdirAll(externalBuilderOutput, 0700); err != nil {
   411  		logger.Panicf("could not create externalbuilder build output dir: %s", err)
   412  	}
   413  
   414  	ebMetadataProvider := &externalbuilder.MetadataProvider{
   415  		DurablePath: externalBuilderOutput,
   416  	}
   417  
   418  	lifecycleCache := lifecycle.NewCache(lifecycleResources, mspID, metadataManager, chaincodeCustodian, ebMetadataProvider)
   419  
   420  	txProcessors := map[common.HeaderType]ledger.CustomTxProcessor{
   421  		common.HeaderType_CONFIG: &peer.ConfigTxProcessor{},
   422  	}
   423  
   424  	peerInstance.LedgerMgr = ledgermgmt.NewLedgerMgr(
   425  		&ledgermgmt.Initializer{
   426  			CustomTxProcessors:              txProcessors,
   427  			DeployedChaincodeInfoProvider:   lifecycleValidatorCommitter,
   428  			MembershipInfoProvider:          membershipInfoProvider,
   429  			ChaincodeLifecycleEventProvider: lifecycleCache,
   430  			MetricsProvider:                 metricsProvider,
   431  			HealthCheckRegistry:             opsSystem,
   432  			StateListeners:                  []ledger.StateListener{lifecycleCache},
   433  			Config:                          ledgerConfig(),
   434  			Hasher:                          factory.GetDefault(),
   435  			EbMetadataProvider:              ebMetadataProvider,
   436  		},
   437  	)
   438  
   439  	// Configure CC package storage
   440  	lsccInstallPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "chaincodes")
   441  	ccprovider.SetChaincodesPath(lsccInstallPath)
   442  
   443  	if err := lifecycleCache.InitializeLocalChaincodes(); err != nil {
   444  		return errors.WithMessage(err, "could not initialize local chaincodes")
   445  	}
   446  
   447  	// Parameter overrides must be processed before any parameters are
   448  	// cached. Failures to cache cause the server to terminate immediately.
   449  	if chaincodeDevMode {
   450  		logger.Info("Running in chaincode development mode")
   451  		logger.Info("Disable loading validity system chaincode")
   452  
   453  		viper.Set("chaincode.mode", chaincode.DevModeUserRunsChaincode)
   454  	}
   455  
   456  	mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert
   457  	policyCheckerProvider := func(resourceName string) deliver.PolicyCheckerFunc {
   458  		return func(env *cb.Envelope, channelID string) error {
   459  			return aclProvider.CheckACL(resourceName, channelID, env)
   460  		}
   461  	}
   462  
   463  	metrics := deliver.NewMetrics(metricsProvider)
   464  	abServer := &peer.DeliverServer{
   465  		DeliverHandler: deliver.NewHandler(
   466  			&peer.DeliverChainManager{Peer: peerInstance},
   467  			coreConfig.AuthenticationTimeWindow,
   468  			mutualTLS,
   469  			metrics,
   470  			false,
   471  		),
   472  		PolicyCheckerProvider: policyCheckerProvider,
   473  	}
   474  	pb.RegisterDeliverServer(peerServer.Server(), abServer)
   475  
   476  	// Create a self-signed CA for chaincode service
   477  	ca, err := tlsgen.NewCA()
   478  	if err != nil {
   479  		logger.Panic("Failed creating authentication layer:", err)
   480  	}
   481  	ccSrv, ccEndpoint, err := createChaincodeServer(coreConfig, ca, peerHost)
   482  	if err != nil {
   483  		logger.Panicf("Failed to create chaincode server: %s", err)
   484  	}
   485  
   486  	//get user mode
   487  	userRunsCC := chaincode.IsDevMode()
   488  	tlsEnabled := coreConfig.PeerTLSEnabled
   489  
   490  	// create chaincode specific tls CA
   491  	authenticator := accesscontrol.NewAuthenticator(ca)
   492  
   493  	chaincodeHandlerRegistry := chaincode.NewHandlerRegistry(userRunsCC)
   494  	lifecycleTxQueryExecutorGetter := &chaincode.TxQueryExecutorGetter{
   495  		CCID:            scc.ChaincodeID(lifecycle.LifecycleNamespace),
   496  		HandlerRegistry: chaincodeHandlerRegistry,
   497  	}
   498  
   499  	if coreConfig.VMEndpoint == "" && len(coreConfig.ExternalBuilders) == 0 {
   500  		logger.Panic("VMEndpoint not set and no ExternalBuilders defined")
   501  	}
   502  
   503  	chaincodeConfig := chaincode.GlobalConfig()
   504  
   505  	var client *docker.Client
   506  	var dockerVM *dockercontroller.DockerVM
   507  	if coreConfig.VMEndpoint != "" {
   508  		client, err = createDockerClient(coreConfig)
   509  		if err != nil {
   510  			logger.Panicf("cannot create docker client: %s", err)
   511  		}
   512  
   513  		dockerVM = &dockercontroller.DockerVM{
   514  			PeerID:        coreConfig.PeerID,
   515  			NetworkID:     coreConfig.NetworkID,
   516  			BuildMetrics:  dockercontroller.NewBuildMetrics(opsSystem.Provider),
   517  			Client:        client,
   518  			AttachStdOut:  coreConfig.VMDockerAttachStdout,
   519  			HostConfig:    getDockerHostConfig(),
   520  			ChaincodePull: coreConfig.ChaincodePull,
   521  			NetworkMode:   coreConfig.VMNetworkMode,
   522  			PlatformBuilder: &platforms.Builder{
   523  				Registry: platformRegistry,
   524  				Client:   client,
   525  			},
   526  			// This field is superfluous for chaincodes built with v2.0+ binaries
   527  			// however, to prevent users from being forced to rebuild leaving for now
   528  			// but it should be removed in the future.
   529  			LoggingEnv: []string{
   530  				"CORE_CHAINCODE_LOGGING_LEVEL=" + chaincodeConfig.LogLevel,
   531  				"CORE_CHAINCODE_LOGGING_SHIM=" + chaincodeConfig.ShimLogLevel,
   532  				"CORE_CHAINCODE_LOGGING_FORMAT=" + chaincodeConfig.LogFormat,
   533  			},
   534  		}
   535  		if err := opsSystem.RegisterChecker("docker", dockerVM); err != nil {
   536  			logger.Panicf("failed to register docker health check: %s", err)
   537  		}
   538  	}
   539  
   540  	externalVM := &externalbuilder.Detector{
   541  		Builders:    externalbuilder.CreateBuilders(coreConfig.ExternalBuilders),
   542  		DurablePath: externalBuilderOutput,
   543  	}
   544  
   545  	buildRegistry := &container.BuildRegistry{}
   546  
   547  	containerRouter := &container.Router{
   548  		DockerBuilder:   dockerVM,
   549  		ExternalBuilder: externalVMAdapter{externalVM},
   550  		PackageProvider: &persistence.FallbackPackageLocator{
   551  			ChaincodePackageLocator: &persistence.ChaincodePackageLocator{
   552  				ChaincodeDir: chaincodeInstallPath,
   553  			},
   554  			LegacyCCPackageLocator: &ccprovider.CCInfoFSImpl{GetHasher: factory.GetDefault()},
   555  		},
   556  	}
   557  
   558  	builtinSCCs := map[string]struct{}{
   559  		"lscc":       {},
   560  		"qscc":       {},
   561  		"cscc":       {},
   562  		"_lifecycle": {},
   563  	}
   564  
   565  	lsccInst := &lscc.SCC{
   566  		BuiltinSCCs: builtinSCCs,
   567  		Support: &lscc.SupportImpl{
   568  			GetMSPIDs: peerInstance.GetMSPIDs,
   569  		},
   570  		SCCProvider:        &lscc.PeerShim{Peer: peerInstance},
   571  		ACLProvider:        aclProvider,
   572  		GetMSPIDs:          peerInstance.GetMSPIDs,
   573  		PolicyChecker:      policyChecker,
   574  		BCCSP:              factory.GetDefault(),
   575  		BuildRegistry:      buildRegistry,
   576  		ChaincodeBuilder:   containerRouter,
   577  		EbMetadataProvider: ebMetadataProvider,
   578  	}
   579  
   580  	chaincodeEndorsementInfo := &lifecycle.ChaincodeEndorsementInfoSource{
   581  		LegacyImpl:  lsccInst,
   582  		Resources:   lifecycleResources,
   583  		Cache:       lifecycleCache,
   584  		BuiltinSCCs: builtinSCCs,
   585  	}
   586  
   587  	containerRuntime := &chaincode.ContainerRuntime{
   588  		BuildRegistry:   buildRegistry,
   589  		ContainerRouter: containerRouter,
   590  	}
   591  
   592  	lifecycleFunctions := &lifecycle.ExternalFunctions{
   593  		Resources:                 lifecycleResources,
   594  		InstallListener:           lifecycleCache,
   595  		InstalledChaincodesLister: lifecycleCache,
   596  		ChaincodeBuilder:          containerRouter,
   597  		BuildRegistry:             buildRegistry,
   598  	}
   599  
   600  	lifecycleSCC := &lifecycle.SCC{
   601  		Dispatcher: &dispatcher.Dispatcher{
   602  			Protobuf: &dispatcher.ProtobufImpl{},
   603  		},
   604  		DeployedCCInfoProvider: lifecycleValidatorCommitter,
   605  		QueryExecutorProvider:  lifecycleTxQueryExecutorGetter,
   606  		Functions:              lifecycleFunctions,
   607  		OrgMSPID:               mspID,
   608  		ChannelConfigSource:    peerInstance,
   609  		ACLProvider:            aclProvider,
   610  	}
   611  
   612  	chaincodeLauncher := &chaincode.RuntimeLauncher{
   613  		Metrics:           chaincode.NewLaunchMetrics(opsSystem.Provider),
   614  		Registry:          chaincodeHandlerRegistry,
   615  		Runtime:           containerRuntime,
   616  		StartupTimeout:    chaincodeConfig.StartupTimeout,
   617  		CertGenerator:     authenticator,
   618  		CACert:            ca.CertBytes(),
   619  		PeerAddress:       ccEndpoint,
   620  		ConnectionHandler: &extcc.ExternalChaincodeRuntime{},
   621  	}
   622  
   623  	// Keep TestQueries working
   624  	if !chaincodeConfig.TLSEnabled {
   625  		chaincodeLauncher.CertGenerator = nil
   626  	}
   627  
   628  	chaincodeSupport := &chaincode.ChaincodeSupport{
   629  		ACLProvider:            aclProvider,
   630  		AppConfig:              peerInstance,
   631  		DeployedCCInfoProvider: lifecycleValidatorCommitter,
   632  		ExecuteTimeout:         chaincodeConfig.ExecuteTimeout,
   633  		InstallTimeout:         chaincodeConfig.InstallTimeout,
   634  		HandlerRegistry:        chaincodeHandlerRegistry,
   635  		HandlerMetrics:         chaincode.NewHandlerMetrics(opsSystem.Provider),
   636  		Keepalive:              chaincodeConfig.Keepalive,
   637  		Launcher:               chaincodeLauncher,
   638  		Lifecycle:              chaincodeEndorsementInfo,
   639  		Peer:                   peerInstance,
   640  		Runtime:                containerRuntime,
   641  		BuiltinSCCs:            builtinSCCs,
   642  		TotalQueryLimit:        chaincodeConfig.TotalQueryLimit,
   643  		UserRunsCC:             userRunsCC,
   644  	}
   645  
   646  	custodianLauncher := custodianLauncherAdapter{
   647  		launcher:      chaincodeLauncher,
   648  		streamHandler: chaincodeSupport,
   649  	}
   650  	go chaincodeCustodian.Work(buildRegistry, containerRouter, custodianLauncher)
   651  
   652  	ccSupSrv := pb.ChaincodeSupportServer(chaincodeSupport)
   653  	if tlsEnabled {
   654  		ccSupSrv = authenticator.Wrap(ccSupSrv)
   655  	}
   656  
   657  	csccInst := cscc.New(
   658  		aclProvider,
   659  		lifecycleValidatorCommitter,
   660  		lsccInst,
   661  		lifecycleValidatorCommitter,
   662  		policyChecker,
   663  		peerInstance,
   664  		factory.GetDefault(),
   665  	)
   666  	qsccInst := scc.SelfDescribingSysCC(qscc.New(aclProvider, peerInstance))
   667  	if maxConcurrency := coreConfig.LimitsConcurrencyQSCC; maxConcurrency != 0 {
   668  		qsccInst = scc.Throttle(maxConcurrency, qsccInst)
   669  	}
   670  
   671  	pb.RegisterChaincodeSupportServer(ccSrv.Server(), ccSupSrv)
   672  
   673  	// start the chaincode specific gRPC listening service
   674  	go ccSrv.Start()
   675  
   676  	logger.Debugf("Running peer")
   677  
   678  	libConf, err := library.LoadConfig()
   679  	if err != nil {
   680  		return errors.WithMessage(err, "could not decode peer handlers configuration")
   681  	}
   682  
   683  	reg := library.InitRegistry(libConf)
   684  
   685  	authFilters := reg.Lookup(library.Auth).([]authHandler.Filter)
   686  	endorserSupport := &endorser.SupportImpl{
   687  		SignerSerializer: signingIdentity,
   688  		Peer:             peerInstance,
   689  		ChaincodeSupport: chaincodeSupport,
   690  		ACLProvider:      aclProvider,
   691  		BuiltinSCCs:      builtinSCCs,
   692  	}
   693  	endorsementPluginsByName := reg.Lookup(library.Endorsement).(map[string]endorsement2.PluginFactory)
   694  	validationPluginsByName := reg.Lookup(library.Validation).(map[string]validation.PluginFactory)
   695  	signingIdentityFetcher := (endorsement3.SigningIdentityFetcher)(endorserSupport)
   696  	channelStateRetriever := endorser.ChannelStateRetriever(endorserSupport)
   697  	pluginMapper := endorser.MapBasedPluginMapper(endorsementPluginsByName)
   698  	pluginEndorser := endorser.NewPluginEndorser(&endorser.PluginSupport{
   699  		ChannelStateRetriever:   channelStateRetriever,
   700  		TransientStoreRetriever: peerInstance,
   701  		PluginMapper:            pluginMapper,
   702  		SigningIdentityFetcher:  signingIdentityFetcher,
   703  	})
   704  	endorserSupport.PluginEndorser = pluginEndorser
   705  	channelFetcher := endorserChannelAdapter{
   706  		peer: peerInstance,
   707  	}
   708  	serverEndorser := &endorser.Endorser{
   709  		PrivateDataDistributor: gossipService,
   710  		ChannelFetcher:         channelFetcher,
   711  		LocalMSP:               localMSP,
   712  		Support:                endorserSupport,
   713  		Metrics:                endorser.NewMetrics(metricsProvider),
   714  	}
   715  
   716  	// deploy system chaincodes
   717  	for _, cc := range []scc.SelfDescribingSysCC{lsccInst, csccInst, qsccInst, lifecycleSCC} {
   718  		if enabled, ok := chaincodeConfig.SCCWhitelist[cc.Name()]; !ok || !enabled {
   719  			logger.Infof("not deploying chaincode %s as it is not enabled", cc.Name())
   720  			continue
   721  		}
   722  		scc.DeploySysCC(cc, chaincodeSupport)
   723  	}
   724  
   725  	logger.Infof("Deployed system chaincodes")
   726  
   727  	// register the lifecycleMetadataManager to get updates from the legacy
   728  	// chaincode; lifecycleMetadataManager will aggregate these updates with
   729  	// the ones from the new lifecycle and deliver both
   730  	// this is expected to disappear with FAB-15061
   731  	legacyMetadataManager.AddListener(metadataManager)
   732  
   733  	// register gossip as a listener for updates from lifecycleMetadataManager
   734  	metadataManager.AddListener(lifecycle.HandleMetadataUpdateFunc(func(channel string, chaincodes ccdef.MetadataSet) {
   735  		gossipService.UpdateChaincodes(chaincodes.AsChaincodes(), gossipcommon.ChannelID(channel))
   736  	}))
   737  
   738  	// this brings up all the channels
   739  	peerInstance.Initialize(
   740  		func(cid string) {
   741  			// initialize the metadata for this channel.
   742  			// This call will pre-populate chaincode information for this
   743  			// channel but it won't fire any updates to its listeners
   744  			lifecycleCache.InitializeMetadata(cid)
   745  
   746  			// initialize the legacyMetadataManager for this channel.
   747  			// This call will pre-populate chaincode information from
   748  			// the legacy lifecycle for this channel; it will also fire
   749  			// the listener, which will cascade to metadataManager
   750  			// and eventually to gossip to pre-populate data structures.
   751  			// this is expected to disappear with FAB-15061
   752  			sub, err := legacyMetadataManager.NewChannelSubscription(cid, cclifecycle.QueryCreatorFunc(func() (cclifecycle.Query, error) {
   753  				return peerInstance.GetLedger(cid).NewQueryExecutor()
   754  			}))
   755  			if err != nil {
   756  				logger.Panicf("Failed subscribing to chaincode lifecycle updates")
   757  			}
   758  
   759  			// register this channel's legacyMetadataManager (sub) to get ledger updates
   760  			// this is expected to disappear with FAB-15061
   761  			cceventmgmt.GetMgr().Register(cid, sub)
   762  		},
   763  		plugin.MapBasedMapper(validationPluginsByName),
   764  		lifecycleValidatorCommitter,
   765  		lsccInst,
   766  		lifecycleValidatorCommitter,
   767  		coreConfig.ValidatorPoolSize,
   768  	)
   769  
   770  	if coreConfig.DiscoveryEnabled {
   771  		registerDiscoveryService(
   772  			coreConfig,
   773  			peerInstance,
   774  			peerServer,
   775  			policyMgr,
   776  			lifecycle.NewMetadataProvider(
   777  				lifecycleCache,
   778  				legacyMetadataManager,
   779  				peerInstance,
   780  			),
   781  			gossipService,
   782  		)
   783  	}
   784  
   785  	logger.Infof("Starting peer with ID=[%s], network ID=[%s], address=[%s]", coreConfig.PeerID, coreConfig.NetworkID, coreConfig.PeerAddress)
   786  
   787  	// Get configuration before starting go routines to avoid
   788  	// racing in tests
   789  	profileEnabled := coreConfig.ProfileEnabled
   790  	profileListenAddress := coreConfig.ProfileListenAddress
   791  
   792  	// Start the grpc server. Done in a goroutine so we can deploy the
   793  	// genesis block if needed.
   794  	serve := make(chan error)
   795  
   796  	// Start profiling http endpoint if enabled
   797  	if profileEnabled {
   798  		go func() {
   799  			logger.Infof("Starting profiling server with listenAddress = %s", profileListenAddress)
   800  			if profileErr := http.ListenAndServe(profileListenAddress, nil); profileErr != nil {
   801  				logger.Errorf("Error starting profiler: %s", profileErr)
   802  			}
   803  		}()
   804  	}
   805  
   806  	go handleSignals(addPlatformSignals(map[os.Signal]func(){
   807  		syscall.SIGINT:  func() { serve <- nil },
   808  		syscall.SIGTERM: func() { serve <- nil },
   809  	}))
   810  
   811  	logger.Infof("Started peer with ID=[%s], network ID=[%s], address=[%s]", coreConfig.PeerID, coreConfig.NetworkID, coreConfig.PeerAddress)
   812  
   813  	// get a list of ledger IDs and load preResetHeight files for these ledger IDs
   814  	ledgerIDs, err := peerInstance.LedgerMgr.GetLedgerIDs()
   815  	if err != nil {
   816  		return errors.WithMessage(err, "failed to get ledger IDs")
   817  	}
   818  
   819  	// check to see if the peer ledgers have been reset
   820  	rootFSPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "ledgersData")
   821  	preResetHeights, err := kvledger.LoadPreResetHeight(rootFSPath, ledgerIDs)
   822  	if err != nil {
   823  		return fmt.Errorf("error loading prereset height: %s", err)
   824  	}
   825  
   826  	for cid, height := range preResetHeights {
   827  		logger.Infof("Ledger rebuild: channel [%s]: preresetHeight: [%d]", cid, height)
   828  	}
   829  
   830  	if len(preResetHeights) > 0 {
   831  		logger.Info("Ledger rebuild: Entering loop to check if current ledger heights surpass prereset ledger heights. Endorsement request processing will be disabled.")
   832  		resetFilter := &reset{
   833  			reject: true,
   834  		}
   835  		authFilters = append(authFilters, resetFilter)
   836  		go resetLoop(resetFilter, preResetHeights, ledgerIDs, peerInstance.GetLedger, 10*time.Second)
   837  	}
   838  
   839  	// start the peer server
   840  	auth := authHandler.ChainFilters(serverEndorser, authFilters...)
   841  	// Register the Endorser server
   842  	pb.RegisterEndorserServer(peerServer.Server(), auth)
   843  
   844  	go func() {
   845  		var grpcErr error
   846  		if grpcErr = peerServer.Start(); grpcErr != nil {
   847  			grpcErr = fmt.Errorf("grpc server exited with error: %s", grpcErr)
   848  		}
   849  		serve <- grpcErr
   850  	}()
   851  
   852  	// Block until grpc server exits
   853  	return <-serve
   854  }
   855  
   856  func handleSignals(handlers map[os.Signal]func()) {
   857  	var signals []os.Signal
   858  	for sig := range handlers {
   859  		signals = append(signals, sig)
   860  	}
   861  
   862  	signalChan := make(chan os.Signal, 1)
   863  	signal.Notify(signalChan, signals...)
   864  
   865  	for sig := range signalChan {
   866  		logger.Infof("Received signal: %d (%s)", sig, sig)
   867  		handlers[sig]()
   868  	}
   869  }
   870  
   871  func localPolicy(policyObject proto.Message) policies.Policy {
   872  	localMSP := mgmt.GetLocalMSP(factory.GetDefault())
   873  	pp := cauthdsl.NewPolicyProvider(localMSP)
   874  	policy, _, err := pp.NewPolicy(protoutil.MarshalOrPanic(policyObject))
   875  	if err != nil {
   876  		logger.Panicf("Failed creating local policy: +%v", err)
   877  	}
   878  	return policy
   879  }
   880  
   881  func createSelfSignedData() protoutil.SignedData {
   882  	sID := mgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault())
   883  	msg := make([]byte, 32)
   884  	sig, err := sID.Sign(msg)
   885  	if err != nil {
   886  		logger.Panicf("Failed creating self signed data because message signing failed: %v", err)
   887  	}
   888  	peerIdentity, err := sID.Serialize()
   889  	if err != nil {
   890  		logger.Panicf("Failed creating self signed data because peer identity couldn't be serialized: %v", err)
   891  	}
   892  	return protoutil.SignedData{
   893  		Data:      msg,
   894  		Signature: sig,
   895  		Identity:  peerIdentity,
   896  	}
   897  }
   898  
   899  func registerDiscoveryService(
   900  	coreConfig *peer.Config,
   901  	peerInstance *peer.Peer,
   902  	peerServer *comm.GRPCServer,
   903  	polMgr policies.ChannelPolicyManagerGetter,
   904  	metadataProvider *lifecycle.MetadataProvider,
   905  	gossipService *gossipservice.GossipService,
   906  ) {
   907  	mspID := coreConfig.LocalMSPID
   908  	localAccessPolicy := localPolicy(cauthdsl.SignedByAnyAdmin([]string{mspID}))
   909  	if coreConfig.DiscoveryOrgMembersAllowed {
   910  		localAccessPolicy = localPolicy(cauthdsl.SignedByAnyMember([]string{mspID}))
   911  	}
   912  	channelVerifier := discacl.NewChannelVerifier(policies.ChannelApplicationWriters, polMgr)
   913  	acl := discacl.NewDiscoverySupport(channelVerifier, localAccessPolicy, discacl.ChannelConfigGetterFunc(peerInstance.GetStableChannelConfig))
   914  	gSup := gossip.NewDiscoverySupport(gossipService)
   915  	ccSup := ccsupport.NewDiscoverySupport(metadataProvider)
   916  	ea := endorsement.NewEndorsementAnalyzer(gSup, ccSup, acl, metadataProvider)
   917  	confSup := config.NewDiscoverySupport(config.CurrentConfigBlockGetterFunc(func(channelID string) *common.Block {
   918  		channel := peerInstance.Channel(channelID)
   919  		if channel == nil {
   920  			return nil
   921  		}
   922  		block, err := peer.ConfigBlockFromLedger(channel.Ledger())
   923  		if err != nil {
   924  			logger.Error("failed to get config block", err)
   925  			return nil
   926  		}
   927  		return block
   928  	}))
   929  	support := discsupport.NewDiscoverySupport(acl, gSup, ea, confSup, acl)
   930  	svc := discovery.NewService(discovery.Config{
   931  		TLS:                          peerServer.TLSEnabled(),
   932  		AuthCacheEnabled:             coreConfig.DiscoveryAuthCacheEnabled,
   933  		AuthCacheMaxSize:             coreConfig.DiscoveryAuthCacheMaxSize,
   934  		AuthCachePurgeRetentionRatio: coreConfig.DiscoveryAuthCachePurgeRetentionRatio,
   935  	}, support)
   936  	logger.Info("Discovery service activated")
   937  	discprotos.RegisterDiscoveryServer(peerServer.Server(), svc)
   938  }
   939  
   940  // create a CC listener using peer.chaincodeListenAddress (and if that's not set use peer.peerAddress)
   941  func createChaincodeServer(coreConfig *peer.Config, ca tlsgen.CA, peerHostname string) (srv *comm.GRPCServer, ccEndpoint string, err error) {
   942  	// before potentially setting chaincodeListenAddress, compute chaincode endpoint at first
   943  	ccEndpoint, err = computeChaincodeEndpoint(coreConfig.ChaincodeAddress, coreConfig.ChaincodeListenAddress, peerHostname)
   944  	if err != nil {
   945  		if chaincode.IsDevMode() {
   946  			// if any error for dev mode, we use 0.0.0.0:7052
   947  			ccEndpoint = fmt.Sprintf("%s:%d", "0.0.0.0", defaultChaincodePort)
   948  			logger.Warningf("use %s as chaincode endpoint because of error in computeChaincodeEndpoint: %s", ccEndpoint, err)
   949  		} else {
   950  			// for non-dev mode, we have to return error
   951  			logger.Errorf("Error computing chaincode endpoint: %s", err)
   952  			return nil, "", err
   953  		}
   954  	}
   955  
   956  	host, _, err := net.SplitHostPort(ccEndpoint)
   957  	if err != nil {
   958  		logger.Panic("Chaincode service host", ccEndpoint, "isn't a valid hostname:", err)
   959  	}
   960  
   961  	cclistenAddress := coreConfig.ChaincodeListenAddress
   962  	if cclistenAddress == "" {
   963  		cclistenAddress = fmt.Sprintf("%s:%d", peerHostname, defaultChaincodePort)
   964  		logger.Warningf("%s is not set, using %s", chaincodeListenAddrKey, cclistenAddress)
   965  		coreConfig.ChaincodeListenAddress = cclistenAddress
   966  	}
   967  
   968  	config, err := peer.GetServerConfig()
   969  	if err != nil {
   970  		logger.Errorf("Error getting server config: %s", err)
   971  		return nil, "", err
   972  	}
   973  
   974  	// set the logger for the server
   975  	config.Logger = flogging.MustGetLogger("core.comm").With("server", "ChaincodeServer")
   976  
   977  	// Override TLS configuration if TLS is applicable
   978  	if config.SecOpts.UseTLS {
   979  		// Create a self-signed TLS certificate with a SAN that matches the computed chaincode endpoint
   980  		certKeyPair, err := ca.NewServerCertKeyPair(host)
   981  		if err != nil {
   982  			logger.Panicf("Failed generating TLS certificate for chaincode service: +%v", err)
   983  		}
   984  		config.SecOpts = comm.SecureOptions{
   985  			UseTLS: true,
   986  			// Require chaincode shim to authenticate itself
   987  			RequireClientCert: true,
   988  			// Trust only client certificates signed by ourselves
   989  			ClientRootCAs: [][]byte{ca.CertBytes()},
   990  			// Use our own self-signed TLS certificate and key
   991  			Certificate: certKeyPair.Cert,
   992  			Key:         certKeyPair.Key,
   993  			// No point in specifying server root CAs since this TLS config is only used for
   994  			// a gRPC server and not a client
   995  			ServerRootCAs: nil,
   996  		}
   997  	}
   998  
   999  	// Chaincode keepalive options - static for now
  1000  	chaincodeKeepaliveOptions := comm.KeepaliveOptions{
  1001  		ServerInterval:    time.Duration(2) * time.Hour,    // 2 hours - gRPC default
  1002  		ServerTimeout:     time.Duration(20) * time.Second, // 20 sec - gRPC default
  1003  		ServerMinInterval: time.Duration(1) * time.Minute,  // match ClientInterval
  1004  	}
  1005  	config.KaOpts = chaincodeKeepaliveOptions
  1006  	config.HealthCheckEnabled = true
  1007  
  1008  	srv, err = comm.NewGRPCServer(cclistenAddress, config)
  1009  	if err != nil {
  1010  		logger.Errorf("Error creating GRPC server: %s", err)
  1011  		return nil, "", err
  1012  	}
  1013  
  1014  	return srv, ccEndpoint, nil
  1015  }
  1016  
  1017  // computeChaincodeEndpoint will utilize chaincode address, chaincode listen
  1018  // address (these two are from viper) and peer address to compute chaincode endpoint.
  1019  // There could be following cases of computing chaincode endpoint:
  1020  // Case A: if chaincodeAddrKey is set, use it if not "0.0.0.0" (or "::")
  1021  // Case B: else if chaincodeListenAddressKey is set and not "0.0.0.0" or ("::"), use it
  1022  // Case C: else use peer address if not "0.0.0.0" (or "::")
  1023  // Case D: else return error
  1024  func computeChaincodeEndpoint(chaincodeAddress string, chaincodeListenAddress string, peerHostname string) (ccEndpoint string, err error) {
  1025  	logger.Infof("Entering computeChaincodeEndpoint with peerHostname: %s", peerHostname)
  1026  	// Case A: the chaincodeAddrKey is set
  1027  	if chaincodeAddress != "" {
  1028  		host, _, err := net.SplitHostPort(chaincodeAddress)
  1029  		if err != nil {
  1030  			logger.Errorf("Fail to split chaincodeAddress: %s", err)
  1031  			return "", err
  1032  		}
  1033  		ccIP := net.ParseIP(host)
  1034  		if ccIP != nil && ccIP.IsUnspecified() {
  1035  			logger.Errorf("ChaincodeAddress' IP cannot be %s in non-dev mode", ccIP)
  1036  			return "", errors.New("invalid endpoint for chaincode to connect")
  1037  		}
  1038  		logger.Infof("Exit with ccEndpoint: %s", chaincodeAddress)
  1039  		return chaincodeAddress, nil
  1040  	}
  1041  
  1042  	// Case B: chaincodeListenAddrKey is set
  1043  	if chaincodeListenAddress != "" {
  1044  		ccEndpoint = chaincodeListenAddress
  1045  		host, port, err := net.SplitHostPort(ccEndpoint)
  1046  		if err != nil {
  1047  			logger.Errorf("ChaincodeAddress is nil and fail to split chaincodeListenAddress: %s", err)
  1048  			return "", err
  1049  		}
  1050  
  1051  		ccListenerIP := net.ParseIP(host)
  1052  		// ignoring other values such as Multicast address etc ...as the server
  1053  		// wouldn't start up with this address anyway
  1054  		if ccListenerIP != nil && ccListenerIP.IsUnspecified() {
  1055  			// Case C: if "0.0.0.0" or "::", we have to use peer address with the listen port
  1056  			peerIP := net.ParseIP(peerHostname)
  1057  			if peerIP != nil && peerIP.IsUnspecified() {
  1058  				// Case D: all we have is "0.0.0.0" or "::" which chaincode cannot connect to
  1059  				logger.Error("ChaincodeAddress is nil while both chaincodeListenAddressIP and peerIP are 0.0.0.0")
  1060  				return "", errors.New("invalid endpoint for chaincode to connect")
  1061  			}
  1062  			ccEndpoint = fmt.Sprintf("%s:%s", peerHostname, port)
  1063  		}
  1064  		logger.Infof("Exit with ccEndpoint: %s", ccEndpoint)
  1065  		return ccEndpoint, nil
  1066  	}
  1067  
  1068  	// Case C: chaincodeListenAddrKey is not set, use peer address
  1069  	peerIP := net.ParseIP(peerHostname)
  1070  	if peerIP != nil && peerIP.IsUnspecified() {
  1071  		// Case D: all we have is "0.0.0.0" or "::" which chaincode cannot connect to
  1072  		logger.Errorf("ChaincodeAddress and chaincodeListenAddress are nil and peerIP is %s", peerIP)
  1073  		return "", errors.New("invalid endpoint for chaincode to connect")
  1074  	}
  1075  
  1076  	// use peerAddress:defaultChaincodePort
  1077  	ccEndpoint = fmt.Sprintf("%s:%d", peerHostname, defaultChaincodePort)
  1078  
  1079  	logger.Infof("Exit with ccEndpoint: %s", ccEndpoint)
  1080  	return ccEndpoint, nil
  1081  }
  1082  
  1083  func createDockerClient(coreConfig *peer.Config) (*docker.Client, error) {
  1084  	if coreConfig.VMDockerTLSEnabled {
  1085  		return docker.NewTLSClient(coreConfig.VMEndpoint, coreConfig.DockerCert, coreConfig.DockerKey, coreConfig.DockerCA)
  1086  	}
  1087  	return docker.NewClient(coreConfig.VMEndpoint)
  1088  }
  1089  
  1090  // secureDialOpts is the callback function for secure dial options for gossip service
  1091  func secureDialOpts(credSupport *comm.CredentialSupport) func() []grpc.DialOption {
  1092  	return func() []grpc.DialOption {
  1093  		var dialOpts []grpc.DialOption
  1094  		// set max send/recv msg sizes
  1095  		dialOpts = append(
  1096  			dialOpts,
  1097  			grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(comm.MaxRecvMsgSize), grpc.MaxCallSendMsgSize(comm.MaxSendMsgSize)),
  1098  		)
  1099  		// set the keepalive options
  1100  		kaOpts := comm.DefaultKeepaliveOptions
  1101  		if viper.IsSet("peer.keepalive.client.interval") {
  1102  			kaOpts.ClientInterval = viper.GetDuration("peer.keepalive.client.interval")
  1103  		}
  1104  		if viper.IsSet("peer.keepalive.client.timeout") {
  1105  			kaOpts.ClientTimeout = viper.GetDuration("peer.keepalive.client.timeout")
  1106  		}
  1107  		dialOpts = append(dialOpts, comm.ClientKeepaliveOptions(kaOpts)...)
  1108  
  1109  		if viper.GetBool("peer.tls.enabled") {
  1110  			dialOpts = append(dialOpts, grpc.WithTransportCredentials(credSupport.GetPeerCredentials()))
  1111  		} else {
  1112  			dialOpts = append(dialOpts, grpc.WithInsecure())
  1113  		}
  1114  		return dialOpts
  1115  	}
  1116  }
  1117  
  1118  // initGossipService will initialize the gossip service by:
  1119  // 1. Enable TLS if configured;
  1120  // 2. Init the message crypto service;
  1121  // 3. Init the security advisor;
  1122  // 4. Init gossip related struct.
  1123  func initGossipService(
  1124  	policyMgr policies.ChannelPolicyManagerGetter,
  1125  	metricsProvider metrics.Provider,
  1126  	peerServer *comm.GRPCServer,
  1127  	signer msp.SigningIdentity,
  1128  	credSupport *comm.CredentialSupport,
  1129  	peerAddress string,
  1130  	deliverGRPCClient *comm.GRPCClient,
  1131  	deliverServiceConfig *deliverservice.DeliverServiceConfig,
  1132  ) (*gossipservice.GossipService, error) {
  1133  
  1134  	var certs *gossipcommon.TLSCertificates
  1135  	if peerServer.TLSEnabled() {
  1136  		serverCert := peerServer.ServerCertificate()
  1137  		clientCert, err := peer.GetClientCertificate()
  1138  		if err != nil {
  1139  			return nil, errors.Wrap(err, "failed obtaining client certificates")
  1140  		}
  1141  		certs = &gossipcommon.TLSCertificates{}
  1142  		certs.TLSServerCert.Store(&serverCert)
  1143  		certs.TLSClientCert.Store(&clientCert)
  1144  	}
  1145  
  1146  	messageCryptoService := peergossip.NewMCS(
  1147  		policyMgr,
  1148  		signer,
  1149  		mgmt.NewDeserializersManager(factory.GetDefault()),
  1150  		factory.GetDefault(),
  1151  	)
  1152  	secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(factory.GetDefault()))
  1153  	bootstrap := viper.GetStringSlice("peer.gossip.bootstrap")
  1154  
  1155  	serviceConfig := service.GlobalConfig()
  1156  	if serviceConfig.Endpoint != "" {
  1157  		peerAddress = serviceConfig.Endpoint
  1158  	}
  1159  	gossipConfig, err := gossipgossip.GlobalConfig(peerAddress, certs, bootstrap...)
  1160  	if err != nil {
  1161  		return nil, errors.Wrap(err, "failed obtaining gossip config")
  1162  	}
  1163  
  1164  	return gossipservice.New(
  1165  		signer,
  1166  		gossipmetrics.NewGossipMetrics(metricsProvider),
  1167  		peerAddress,
  1168  		peerServer.Server(),
  1169  		messageCryptoService,
  1170  		secAdv,
  1171  		secureDialOpts(credSupport),
  1172  		credSupport,
  1173  		deliverGRPCClient,
  1174  		gossipConfig,
  1175  		serviceConfig,
  1176  		deliverServiceConfig,
  1177  	)
  1178  }
  1179  
  1180  func newOperationsSystem(coreConfig *peer.Config) *operations.System {
  1181  	return operations.NewSystem(operations.Options{
  1182  		Logger:        flogging.MustGetLogger("peer.operations"),
  1183  		ListenAddress: coreConfig.OperationsListenAddress,
  1184  		Metrics: operations.MetricsOptions{
  1185  			Provider: coreConfig.MetricsProvider,
  1186  			Statsd: &operations.Statsd{
  1187  				Network:       coreConfig.StatsdNetwork,
  1188  				Address:       coreConfig.StatsdAaddress,
  1189  				WriteInterval: coreConfig.StatsdWriteInterval,
  1190  				Prefix:        coreConfig.StatsdPrefix,
  1191  			},
  1192  		},
  1193  		TLS: operations.TLS{
  1194  			Enabled:            coreConfig.OperationsTLSEnabled,
  1195  			CertFile:           coreConfig.OperationsTLSCertFile,
  1196  			KeyFile:            coreConfig.OperationsTLSKeyFile,
  1197  			ClientCertRequired: coreConfig.OperationsTLSClientAuthRequired,
  1198  			ClientCACertFiles:  coreConfig.OperationsTLSClientRootCAs,
  1199  		},
  1200  		Version: metadata.Version,
  1201  	})
  1202  }
  1203  
  1204  func getDockerHostConfig() *docker.HostConfig {
  1205  	dockerKey := func(key string) string { return "vm.docker.hostConfig." + key }
  1206  	getInt64 := func(key string) int64 { return int64(viper.GetInt(dockerKey(key))) }
  1207  
  1208  	var logConfig docker.LogConfig
  1209  	err := viper.UnmarshalKey(dockerKey("LogConfig"), &logConfig)
  1210  	if err != nil {
  1211  		logger.Panicf("unable to parse Docker LogConfig: %s", err)
  1212  	}
  1213  
  1214  	networkMode := viper.GetString(dockerKey("NetworkMode"))
  1215  	if networkMode == "" {
  1216  		networkMode = "host"
  1217  	}
  1218  
  1219  	memorySwappiness := getInt64("MemorySwappiness")
  1220  	oomKillDisable := viper.GetBool(dockerKey("OomKillDisable"))
  1221  
  1222  	return &docker.HostConfig{
  1223  		CapAdd:  viper.GetStringSlice(dockerKey("CapAdd")),
  1224  		CapDrop: viper.GetStringSlice(dockerKey("CapDrop")),
  1225  
  1226  		DNS:         viper.GetStringSlice(dockerKey("Dns")),
  1227  		DNSSearch:   viper.GetStringSlice(dockerKey("DnsSearch")),
  1228  		ExtraHosts:  viper.GetStringSlice(dockerKey("ExtraHosts")),
  1229  		NetworkMode: networkMode,
  1230  		IpcMode:     viper.GetString(dockerKey("IpcMode")),
  1231  		PidMode:     viper.GetString(dockerKey("PidMode")),
  1232  		UTSMode:     viper.GetString(dockerKey("UTSMode")),
  1233  		LogConfig:   logConfig,
  1234  
  1235  		ReadonlyRootfs:   viper.GetBool(dockerKey("ReadonlyRootfs")),
  1236  		SecurityOpt:      viper.GetStringSlice(dockerKey("SecurityOpt")),
  1237  		CgroupParent:     viper.GetString(dockerKey("CgroupParent")),
  1238  		Memory:           getInt64("Memory"),
  1239  		MemorySwap:       getInt64("MemorySwap"),
  1240  		MemorySwappiness: &memorySwappiness,
  1241  		OOMKillDisable:   &oomKillDisable,
  1242  		CPUShares:        getInt64("CpuShares"),
  1243  		CPUSet:           viper.GetString(dockerKey("Cpuset")),
  1244  		CPUSetCPUs:       viper.GetString(dockerKey("CpusetCPUs")),
  1245  		CPUSetMEMs:       viper.GetString(dockerKey("CpusetMEMs")),
  1246  		CPUQuota:         getInt64("CpuQuota"),
  1247  		CPUPeriod:        getInt64("CpuPeriod"),
  1248  		BlkioWeight:      getInt64("BlkioWeight"),
  1249  	}
  1250  }
  1251  
  1252  //go:generate counterfeiter -o mock/get_ledger.go -fake-name GetLedger . getLedger
  1253  //go:generate counterfeiter -o mock/peer_ledger.go -fake-name PeerLedger . peerLedger
  1254  
  1255  type peerLedger interface {
  1256  	ledger.PeerLedger
  1257  }
  1258  
  1259  type getLedger func(string) ledger.PeerLedger
  1260  
  1261  func resetLoop(
  1262  	resetFilter *reset,
  1263  	preResetHeights map[string]uint64,
  1264  	ledgerIDs []string,
  1265  	pLedger getLedger,
  1266  	interval time.Duration,
  1267  ) {
  1268  	// periodically check to see if current ledger height(s) surpass prereset height(s)
  1269  	ticker := time.NewTicker(interval)
  1270  
  1271  	defer ticker.Stop()
  1272  	for {
  1273  		select {
  1274  		case <-ticker.C:
  1275  			logger.Info("Ledger rebuild: Checking if current ledger heights surpass prereset ledger heights")
  1276  			logger.Debugf("Ledger rebuild: Number of ledgers still rebuilding before check: %d", len(preResetHeights))
  1277  			for cid, height := range preResetHeights {
  1278  				var l peerLedger
  1279  				l = pLedger(cid)
  1280  				if l == nil {
  1281  					logger.Warningf("No ledger found for channel [%s]", cid)
  1282  					continue
  1283  				}
  1284  				bcInfo, err := l.GetBlockchainInfo()
  1285  				if bcInfo != nil {
  1286  					logger.Debugf("Ledger rebuild: channel [%s]: currentHeight [%d] : preresetHeight [%d]", cid, bcInfo.GetHeight(), height)
  1287  					if bcInfo.GetHeight() >= height {
  1288  						delete(preResetHeights, cid)
  1289  					} else {
  1290  						break
  1291  					}
  1292  				} else {
  1293  					if err != nil {
  1294  						logger.Warningf("Ledger rebuild: could not retrieve info for channel [%s]: %s", cid, err.Error())
  1295  					}
  1296  				}
  1297  			}
  1298  
  1299  			logger.Debugf("Ledger rebuild: Number of ledgers still rebuilding after check: %d", len(preResetHeights))
  1300  			if len(preResetHeights) == 0 {
  1301  				logger.Infof("Ledger rebuild: Complete, all ledgers surpass prereset heights. Endorsement request processing will be enabled.")
  1302  				rootFSPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "ledgersData")
  1303  				err := kvledger.ClearPreResetHeight(rootFSPath, ledgerIDs)
  1304  				if err != nil {
  1305  					logger.Warningf("Ledger rebuild: could not clear off prerest files: error=%s", err)
  1306  				}
  1307  				resetFilter.setReject(false)
  1308  				return
  1309  			}
  1310  		}
  1311  	}
  1312  }
  1313  
  1314  //implements the auth.Filter interface
  1315  type reset struct {
  1316  	sync.RWMutex
  1317  	next   pb.EndorserServer
  1318  	reject bool
  1319  }
  1320  
  1321  func (r *reset) setReject(reject bool) {
  1322  	r.Lock()
  1323  	defer r.Unlock()
  1324  	r.reject = reject
  1325  }
  1326  
  1327  // Init initializes Reset with the next EndorserServer
  1328  func (r *reset) Init(next pb.EndorserServer) {
  1329  	r.next = next
  1330  }
  1331  
  1332  // ProcessProposal processes a signed proposal
  1333  func (r *reset) ProcessProposal(ctx context.Context, signedProp *pb.SignedProposal) (*pb.ProposalResponse, error) {
  1334  	r.RLock()
  1335  	defer r.RUnlock()
  1336  	if r.reject {
  1337  		return nil, errors.New("endorse requests are blocked while ledgers are being rebuilt")
  1338  	}
  1339  	return r.next.ProcessProposal(ctx, signedProp)
  1340  }