github.com/lzy4123/fabric@v2.1.1+incompatible/internal/peer/node/start.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package node 8 9 import ( 10 "context" 11 "fmt" 12 "io" 13 "io/ioutil" 14 "net" 15 "net/http" 16 "os" 17 "os/signal" 18 "path/filepath" 19 "sync" 20 "syscall" 21 "time" 22 23 docker "github.com/fsouza/go-dockerclient" 24 "github.com/golang/protobuf/proto" 25 "github.com/hyperledger/fabric-protos-go/common" 26 cb "github.com/hyperledger/fabric-protos-go/common" 27 discprotos "github.com/hyperledger/fabric-protos-go/discovery" 28 pb "github.com/hyperledger/fabric-protos-go/peer" 29 "github.com/hyperledger/fabric/bccsp/factory" 30 "github.com/hyperledger/fabric/common/cauthdsl" 31 ccdef "github.com/hyperledger/fabric/common/chaincode" 32 "github.com/hyperledger/fabric/common/crypto" 33 "github.com/hyperledger/fabric/common/crypto/tlsgen" 34 "github.com/hyperledger/fabric/common/deliver" 35 "github.com/hyperledger/fabric/common/flogging" 36 floggingmetrics "github.com/hyperledger/fabric/common/flogging/metrics" 37 "github.com/hyperledger/fabric/common/grpclogging" 38 "github.com/hyperledger/fabric/common/grpcmetrics" 39 "github.com/hyperledger/fabric/common/metadata" 40 "github.com/hyperledger/fabric/common/metrics" 41 "github.com/hyperledger/fabric/common/policies" 42 "github.com/hyperledger/fabric/common/policydsl" 43 "github.com/hyperledger/fabric/core/aclmgmt" 44 "github.com/hyperledger/fabric/core/cclifecycle" 45 "github.com/hyperledger/fabric/core/chaincode" 46 "github.com/hyperledger/fabric/core/chaincode/accesscontrol" 47 "github.com/hyperledger/fabric/core/chaincode/extcc" 48 "github.com/hyperledger/fabric/core/chaincode/lifecycle" 49 "github.com/hyperledger/fabric/core/chaincode/persistence" 50 "github.com/hyperledger/fabric/core/chaincode/platforms" 51 "github.com/hyperledger/fabric/core/committer/txvalidator/plugin" 52 "github.com/hyperledger/fabric/core/common/ccprovider" 53 "github.com/hyperledger/fabric/core/common/privdata" 54 coreconfig "github.com/hyperledger/fabric/core/config" 55 "github.com/hyperledger/fabric/core/container" 56 "github.com/hyperledger/fabric/core/container/dockercontroller" 57 "github.com/hyperledger/fabric/core/container/externalbuilder" 58 "github.com/hyperledger/fabric/core/deliverservice" 59 "github.com/hyperledger/fabric/core/dispatcher" 60 "github.com/hyperledger/fabric/core/endorser" 61 authHandler "github.com/hyperledger/fabric/core/handlers/auth" 62 endorsement2 "github.com/hyperledger/fabric/core/handlers/endorsement/api" 63 endorsement3 "github.com/hyperledger/fabric/core/handlers/endorsement/api/identities" 64 "github.com/hyperledger/fabric/core/handlers/library" 65 validation "github.com/hyperledger/fabric/core/handlers/validation/api" 66 "github.com/hyperledger/fabric/core/ledger" 67 "github.com/hyperledger/fabric/core/ledger/cceventmgmt" 68 "github.com/hyperledger/fabric/core/ledger/kvledger" 69 "github.com/hyperledger/fabric/core/ledger/ledgermgmt" 70 "github.com/hyperledger/fabric/core/operations" 71 "github.com/hyperledger/fabric/core/peer" 72 "github.com/hyperledger/fabric/core/policy" 73 "github.com/hyperledger/fabric/core/scc" 74 "github.com/hyperledger/fabric/core/scc/cscc" 75 "github.com/hyperledger/fabric/core/scc/lscc" 76 "github.com/hyperledger/fabric/core/scc/qscc" 77 "github.com/hyperledger/fabric/core/transientstore" 78 "github.com/hyperledger/fabric/discovery" 79 "github.com/hyperledger/fabric/discovery/endorsement" 80 discsupport "github.com/hyperledger/fabric/discovery/support" 81 discacl "github.com/hyperledger/fabric/discovery/support/acl" 82 ccsupport "github.com/hyperledger/fabric/discovery/support/chaincode" 83 "github.com/hyperledger/fabric/discovery/support/config" 84 "github.com/hyperledger/fabric/discovery/support/gossip" 85 gossipcommon "github.com/hyperledger/fabric/gossip/common" 86 gossipgossip "github.com/hyperledger/fabric/gossip/gossip" 87 gossipmetrics "github.com/hyperledger/fabric/gossip/metrics" 88 gossipprivdata "github.com/hyperledger/fabric/gossip/privdata" 89 "github.com/hyperledger/fabric/gossip/service" 90 gossipservice "github.com/hyperledger/fabric/gossip/service" 91 peergossip "github.com/hyperledger/fabric/internal/peer/gossip" 92 "github.com/hyperledger/fabric/internal/peer/version" 93 "github.com/hyperledger/fabric/internal/pkg/comm" 94 "github.com/hyperledger/fabric/msp" 95 "github.com/hyperledger/fabric/msp/mgmt" 96 "github.com/hyperledger/fabric/protoutil" 97 "github.com/pkg/errors" 98 "github.com/spf13/cobra" 99 "github.com/spf13/viper" 100 "google.golang.org/grpc" 101 ) 102 103 const ( 104 chaincodeAddrKey = "peer.chaincodeAddress" 105 chaincodeListenAddrKey = "peer.chaincodeListenAddress" 106 defaultChaincodePort = 7052 107 ) 108 109 var chaincodeDevMode bool 110 111 func startCmd() *cobra.Command { 112 // Set the flags on the node start command. 113 flags := nodeStartCmd.Flags() 114 flags.BoolVarP(&chaincodeDevMode, "peer-chaincodedev", "", false, "start peer in chaincode development mode") 115 return nodeStartCmd 116 } 117 118 var nodeStartCmd = &cobra.Command{ 119 Use: "start", 120 Short: "Starts the node.", 121 Long: `Starts a node that interacts with the network.`, 122 RunE: func(cmd *cobra.Command, args []string) error { 123 if len(args) != 0 { 124 return fmt.Errorf("trailing args detected") 125 } 126 // Parsing of the command line is done so silence cmd usage 127 cmd.SilenceUsage = true 128 return serve(args) 129 }, 130 } 131 132 // externalVMAdapter adapts coerces the result of Build to the 133 // container.Interface type expected by the VM interface. 134 type externalVMAdapter struct { 135 detector *externalbuilder.Detector 136 } 137 138 func (e externalVMAdapter) Build( 139 ccid string, 140 mdBytes []byte, 141 codePackage io.Reader, 142 ) (container.Instance, error) { 143 i, err := e.detector.Build(ccid, mdBytes, codePackage) 144 if err != nil { 145 return nil, err 146 } 147 148 // ensure <nil> is returned instead of (*externalbuilder.Instance)(nil) 149 if i == nil { 150 return nil, nil 151 } 152 return i, err 153 } 154 155 type disabledDockerBuilder struct{} 156 157 func (disabledDockerBuilder) Build(string, *persistence.ChaincodePackageMetadata, io.Reader) (container.Instance, error) { 158 return nil, errors.New("docker build is disabled") 159 } 160 161 type endorserChannelAdapter struct { 162 peer *peer.Peer 163 } 164 165 func (e endorserChannelAdapter) Channel(channelID string) *endorser.Channel { 166 if peerChannel := e.peer.Channel(channelID); peerChannel != nil { 167 return &endorser.Channel{ 168 IdentityDeserializer: peerChannel.MSPManager(), 169 } 170 } 171 172 return nil 173 } 174 175 type custodianLauncherAdapter struct { 176 launcher chaincode.Launcher 177 streamHandler extcc.StreamHandler 178 } 179 180 func (c custodianLauncherAdapter) Launch(ccid string) error { 181 return c.launcher.Launch(ccid, c.streamHandler) 182 } 183 184 func (c custodianLauncherAdapter) Stop(ccid string) error { 185 return c.launcher.Stop(ccid) 186 } 187 188 func serve(args []string) error { 189 // currently the peer only works with the standard MSP 190 // because in certain scenarios the MSP has to make sure 191 // that from a single credential you only have a single 'identity'. 192 // Idemix does not support this *YET* but it can be easily 193 // fixed to support it. For now, we just make sure that 194 // the peer only comes up with the standard MSP 195 mspType := mgmt.GetLocalMSP(factory.GetDefault()).GetType() 196 if mspType != msp.FABRIC { 197 panic("Unsupported msp type " + msp.ProviderTypeToString(mspType)) 198 } 199 200 // Trace RPCs with the golang.org/x/net/trace package. This was moved out of 201 // the deliver service connection factory as it has process wide implications 202 // and was racy with respect to initialization of gRPC clients and servers. 203 grpc.EnableTracing = true 204 205 logger.Infof("Starting %s", version.GetInfo()) 206 207 //obtain coreConfiguration 208 coreConfig, err := peer.GlobalConfig() 209 if err != nil { 210 return err 211 } 212 213 platformRegistry := platforms.NewRegistry(platforms.SupportedPlatforms...) 214 215 identityDeserializerFactory := func(chainID string) msp.IdentityDeserializer { 216 return mgmt.GetManagerForChain(chainID) 217 } 218 219 opsSystem := newOperationsSystem(coreConfig) 220 err = opsSystem.Start() 221 if err != nil { 222 return errors.WithMessage(err, "failed to initialize operations subsystem") 223 } 224 defer opsSystem.Stop() 225 226 metricsProvider := opsSystem.Provider 227 logObserver := floggingmetrics.NewObserver(metricsProvider) 228 flogging.SetObserver(logObserver) 229 230 mspID := coreConfig.LocalMSPID 231 232 membershipInfoProvider := privdata.NewMembershipInfoProvider(mspID, createSelfSignedData(), identityDeserializerFactory) 233 234 chaincodeInstallPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "lifecycle", "chaincodes") 235 ccStore := persistence.NewStore(chaincodeInstallPath) 236 ccPackageParser := &persistence.ChaincodePackageParser{ 237 MetadataProvider: ccprovider.PersistenceAdapter(ccprovider.MetadataAsTarEntries), 238 } 239 240 peerHost, _, err := net.SplitHostPort(coreConfig.PeerAddress) 241 if err != nil { 242 return fmt.Errorf("peer address is not in the format of host:port: %v", err) 243 } 244 245 listenAddr := coreConfig.ListenAddress 246 serverConfig, err := peer.GetServerConfig() 247 if err != nil { 248 logger.Fatalf("Error loading secure config for peer (%s)", err) 249 } 250 251 serverConfig.Logger = flogging.MustGetLogger("core.comm").With("server", "PeerServer") 252 serverConfig.ServerStatsHandler = comm.NewServerStatsHandler(metricsProvider) 253 serverConfig.UnaryInterceptors = append( 254 serverConfig.UnaryInterceptors, 255 grpcmetrics.UnaryServerInterceptor(grpcmetrics.NewUnaryMetrics(metricsProvider)), 256 grpclogging.UnaryServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()), 257 ) 258 serverConfig.StreamInterceptors = append( 259 serverConfig.StreamInterceptors, 260 grpcmetrics.StreamServerInterceptor(grpcmetrics.NewStreamMetrics(metricsProvider)), 261 grpclogging.StreamServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()), 262 ) 263 264 semaphores := initGrpcSemaphores(coreConfig) 265 if len(semaphores) != 0 { 266 serverConfig.UnaryInterceptors = append(serverConfig.UnaryInterceptors, unaryGrpcLimiter(semaphores)) 267 serverConfig.StreamInterceptors = append(serverConfig.StreamInterceptors, streamGrpcLimiter(semaphores)) 268 } 269 270 cs := comm.NewCredentialSupport() 271 if serverConfig.SecOpts.UseTLS { 272 logger.Info("Starting peer with TLS enabled") 273 cs = comm.NewCredentialSupport(serverConfig.SecOpts.ServerRootCAs...) 274 275 // set the cert to use if client auth is requested by remote endpoints 276 clientCert, err := peer.GetClientCertificate() 277 if err != nil { 278 logger.Fatalf("Failed to set TLS client certificate (%s)", err) 279 } 280 cs.SetClientCertificate(clientCert) 281 } 282 283 transientStoreProvider, err := transientstore.NewStoreProvider( 284 filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "transientstore"), 285 ) 286 if err != nil { 287 return errors.WithMessage(err, "failed to open transient store") 288 } 289 290 deliverServiceConfig := deliverservice.GlobalConfig() 291 292 peerInstance := &peer.Peer{ 293 ServerConfig: serverConfig, 294 CredentialSupport: cs, 295 StoreProvider: transientStoreProvider, 296 CryptoProvider: factory.GetDefault(), 297 OrdererEndpointOverrides: deliverServiceConfig.OrdererEndpointOverrides, 298 } 299 300 localMSP := mgmt.GetLocalMSP(factory.GetDefault()) 301 signingIdentity, err := localMSP.GetDefaultSigningIdentity() 302 if err != nil { 303 logger.Panicf("Could not get the default signing identity from the local MSP: [%+v]", err) 304 } 305 306 signingIdentityBytes, err := signingIdentity.Serialize() 307 if err != nil { 308 logger.Panicf("Failed to serialize the signing identity: %v", err) 309 } 310 311 expirationLogger := flogging.MustGetLogger("certmonitor") 312 crypto.TrackExpiration( 313 serverConfig.SecOpts.UseTLS, 314 serverConfig.SecOpts.Certificate, 315 cs.GetClientCertificate().Certificate, 316 signingIdentityBytes, 317 expirationLogger.Warnf, // This can be used to piggyback a metric event in the future 318 time.Now(), 319 time.AfterFunc) 320 321 policyMgr := policies.PolicyManagerGetterFunc(peerInstance.GetPolicyManager) 322 323 deliverGRPCClient, err := comm.NewGRPCClient(comm.ClientConfig{ 324 Timeout: deliverServiceConfig.ConnectionTimeout, 325 KaOpts: deliverServiceConfig.KeepaliveOptions, 326 SecOpts: deliverServiceConfig.SecOpts, 327 }) 328 if err != nil { 329 logger.Panicf("Could not create the deliver grpc client: [%+v]", err) 330 } 331 332 policyChecker := policy.NewPolicyChecker( 333 policies.PolicyManagerGetterFunc(peerInstance.GetPolicyManager), 334 mgmt.GetLocalMSP(factory.GetDefault()), 335 mgmt.NewLocalMSPPrincipalGetter(factory.GetDefault()), 336 ) 337 338 //startup aclmgmt with default ACL providers (resource based and default 1.0 policies based). 339 //Users can pass in their own ACLProvider to RegisterACLProvider (currently unit tests do this) 340 aclProvider := aclmgmt.NewACLProvider( 341 aclmgmt.ResourceGetter(peerInstance.GetStableChannelConfig), 342 policyChecker, 343 ) 344 345 // TODO, unfortunately, the lifecycle initialization is very unclean at the 346 // moment. This is because ccprovider.SetChaincodePath only works after 347 // ledgermgmt.Initialize, but ledgermgmt.Initialize requires a reference to 348 // lifecycle. Finally, lscc requires a reference to the system chaincode 349 // provider in order to be created, which requires chaincode support to be 350 // up, which also requires, you guessed it, lifecycle. Once we remove the 351 // v1.0 lifecycle, we should be good to collapse all of the init of lifecycle 352 // to this point. 353 lifecycleResources := &lifecycle.Resources{ 354 Serializer: &lifecycle.Serializer{}, 355 ChannelConfigSource: peerInstance, 356 ChaincodeStore: ccStore, 357 PackageParser: ccPackageParser, 358 } 359 360 privdataConfig := gossipprivdata.GlobalConfig() 361 lifecycleValidatorCommitter := &lifecycle.ValidatorCommitter{ 362 CoreConfig: coreConfig, 363 PrivdataConfig: privdataConfig, 364 Resources: lifecycleResources, 365 LegacyDeployedCCInfoProvider: &lscc.DeployedCCInfoProvider{}, 366 } 367 368 ccInfoFSImpl := &ccprovider.CCInfoFSImpl{GetHasher: factory.GetDefault()} 369 370 // legacyMetadataManager collects metadata information from the legacy 371 // lifecycle (lscc). This is expected to disappear with FAB-15061. 372 legacyMetadataManager, err := cclifecycle.NewMetadataManager( 373 cclifecycle.EnumerateFunc( 374 func() ([]ccdef.InstalledChaincode, error) { 375 return ccInfoFSImpl.ListInstalledChaincodes(ccInfoFSImpl.GetChaincodeInstallPath(), ioutil.ReadDir, ccprovider.LoadPackage) 376 }, 377 ), 378 ) 379 if err != nil { 380 logger.Panicf("Failed creating LegacyMetadataManager: +%v", err) 381 } 382 383 // metadataManager aggregates metadata information from _lifecycle and 384 // the legacy lifecycle (lscc). 385 metadataManager := lifecycle.NewMetadataManager() 386 387 // the purpose of these two managers is to feed per-channel chaincode data 388 // into gossip owing to the fact that we are transitioning from lscc to 389 // _lifecycle, we still have two providers of such information until v2.1, 390 // in which we will remove the legacy. 391 // 392 // the flow of information is the following 393 // 394 // gossip <-- metadataManager <-- lifecycleCache (for _lifecycle) 395 // \ 396 // - legacyMetadataManager (for lscc) 397 // 398 // FAB-15061 tracks the work necessary to remove LSCC, at which point we 399 // will be able to simplify the flow to simply be 400 // 401 // gossip <-- lifecycleCache 402 403 chaincodeCustodian := lifecycle.NewChaincodeCustodian() 404 405 externalBuilderOutput := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "externalbuilder", "builds") 406 if err := os.MkdirAll(externalBuilderOutput, 0700); err != nil { 407 logger.Panicf("could not create externalbuilder build output dir: %s", err) 408 } 409 410 ebMetadataProvider := &externalbuilder.MetadataProvider{ 411 DurablePath: externalBuilderOutput, 412 } 413 414 lifecycleCache := lifecycle.NewCache(lifecycleResources, mspID, metadataManager, chaincodeCustodian, ebMetadataProvider) 415 416 txProcessors := map[common.HeaderType]ledger.CustomTxProcessor{ 417 common.HeaderType_CONFIG: &peer.ConfigTxProcessor{}, 418 } 419 420 peerInstance.LedgerMgr = ledgermgmt.NewLedgerMgr( 421 &ledgermgmt.Initializer{ 422 CustomTxProcessors: txProcessors, 423 DeployedChaincodeInfoProvider: lifecycleValidatorCommitter, 424 MembershipInfoProvider: membershipInfoProvider, 425 ChaincodeLifecycleEventProvider: lifecycleCache, 426 MetricsProvider: metricsProvider, 427 HealthCheckRegistry: opsSystem, 428 StateListeners: []ledger.StateListener{lifecycleCache}, 429 Config: ledgerConfig(), 430 Hasher: factory.GetDefault(), 431 EbMetadataProvider: ebMetadataProvider, 432 }, 433 ) 434 435 peerServer, err := comm.NewGRPCServer(listenAddr, serverConfig) 436 if err != nil { 437 logger.Fatalf("Failed to create peer server (%s)", err) 438 } 439 440 // FIXME: Creating the gossip service has the side effect of starting a bunch 441 // of go routines and registration with the grpc server. 442 gossipService, err := initGossipService( 443 policyMgr, 444 metricsProvider, 445 peerServer, 446 signingIdentity, 447 cs, 448 coreConfig.PeerAddress, 449 deliverGRPCClient, 450 deliverServiceConfig, 451 privdataConfig, 452 ) 453 if err != nil { 454 return errors.WithMessage(err, "failed to initialize gossip service") 455 } 456 defer gossipService.Stop() 457 458 peerInstance.GossipService = gossipService 459 460 // Configure CC package storage 461 lsccInstallPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "chaincodes") 462 ccprovider.SetChaincodesPath(lsccInstallPath) 463 464 if err := lifecycleCache.InitializeLocalChaincodes(); err != nil { 465 return errors.WithMessage(err, "could not initialize local chaincodes") 466 } 467 468 // Parameter overrides must be processed before any parameters are 469 // cached. Failures to cache cause the server to terminate immediately. 470 if chaincodeDevMode { 471 logger.Info("Running in chaincode development mode") 472 logger.Info("Disable loading validity system chaincode") 473 474 viper.Set("chaincode.mode", chaincode.DevModeUserRunsChaincode) 475 } 476 477 mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert 478 policyCheckerProvider := func(resourceName string) deliver.PolicyCheckerFunc { 479 return func(env *cb.Envelope, channelID string) error { 480 return aclProvider.CheckACL(resourceName, channelID, env) 481 } 482 } 483 484 metrics := deliver.NewMetrics(metricsProvider) 485 abServer := &peer.DeliverServer{ 486 DeliverHandler: deliver.NewHandler( 487 &peer.DeliverChainManager{Peer: peerInstance}, 488 coreConfig.AuthenticationTimeWindow, 489 mutualTLS, 490 metrics, 491 false, 492 ), 493 PolicyCheckerProvider: policyCheckerProvider, 494 } 495 pb.RegisterDeliverServer(peerServer.Server(), abServer) 496 497 // Create a self-signed CA for chaincode service 498 ca, err := tlsgen.NewCA() 499 if err != nil { 500 logger.Panic("Failed creating authentication layer:", err) 501 } 502 ccSrv, ccEndpoint, err := createChaincodeServer(coreConfig, ca, peerHost) 503 if err != nil { 504 logger.Panicf("Failed to create chaincode server: %s", err) 505 } 506 507 //get user mode 508 userRunsCC := chaincode.IsDevMode() 509 tlsEnabled := coreConfig.PeerTLSEnabled 510 511 // create chaincode specific tls CA 512 authenticator := accesscontrol.NewAuthenticator(ca) 513 514 chaincodeHandlerRegistry := chaincode.NewHandlerRegistry(userRunsCC) 515 lifecycleTxQueryExecutorGetter := &chaincode.TxQueryExecutorGetter{ 516 CCID: scc.ChaincodeID(lifecycle.LifecycleNamespace), 517 HandlerRegistry: chaincodeHandlerRegistry, 518 } 519 520 if coreConfig.VMEndpoint == "" && len(coreConfig.ExternalBuilders) == 0 { 521 logger.Panic("VMEndpoint not set and no ExternalBuilders defined") 522 } 523 524 chaincodeConfig := chaincode.GlobalConfig() 525 526 var dockerBuilder container.DockerBuilder 527 if coreConfig.VMEndpoint != "" { 528 client, err := createDockerClient(coreConfig) 529 if err != nil { 530 logger.Panicf("cannot create docker client: %s", err) 531 } 532 533 dockerVM := &dockercontroller.DockerVM{ 534 PeerID: coreConfig.PeerID, 535 NetworkID: coreConfig.NetworkID, 536 BuildMetrics: dockercontroller.NewBuildMetrics(opsSystem.Provider), 537 Client: client, 538 AttachStdOut: coreConfig.VMDockerAttachStdout, 539 HostConfig: getDockerHostConfig(), 540 ChaincodePull: coreConfig.ChaincodePull, 541 NetworkMode: coreConfig.VMNetworkMode, 542 PlatformBuilder: &platforms.Builder{ 543 Registry: platformRegistry, 544 Client: client, 545 }, 546 // This field is superfluous for chaincodes built with v2.0+ binaries 547 // however, to prevent users from being forced to rebuild leaving for now 548 // but it should be removed in the future. 549 LoggingEnv: []string{ 550 "CORE_CHAINCODE_LOGGING_LEVEL=" + chaincodeConfig.LogLevel, 551 "CORE_CHAINCODE_LOGGING_SHIM=" + chaincodeConfig.ShimLogLevel, 552 "CORE_CHAINCODE_LOGGING_FORMAT=" + chaincodeConfig.LogFormat, 553 }, 554 MSPID: mspID, 555 } 556 if err := opsSystem.RegisterChecker("docker", dockerVM); err != nil { 557 logger.Panicf("failed to register docker health check: %s", err) 558 } 559 dockerBuilder = dockerVM 560 } 561 562 // docker is disabled when we're missing the docker config 563 if dockerBuilder == nil { 564 dockerBuilder = &disabledDockerBuilder{} 565 } 566 567 externalVM := &externalbuilder.Detector{ 568 Builders: externalbuilder.CreateBuilders(coreConfig.ExternalBuilders, mspID), 569 DurablePath: externalBuilderOutput, 570 } 571 572 buildRegistry := &container.BuildRegistry{} 573 574 containerRouter := &container.Router{ 575 DockerBuilder: dockerBuilder, 576 ExternalBuilder: externalVMAdapter{externalVM}, 577 PackageProvider: &persistence.FallbackPackageLocator{ 578 ChaincodePackageLocator: &persistence.ChaincodePackageLocator{ 579 ChaincodeDir: chaincodeInstallPath, 580 }, 581 LegacyCCPackageLocator: &ccprovider.CCInfoFSImpl{GetHasher: factory.GetDefault()}, 582 }, 583 } 584 585 builtinSCCs := map[string]struct{}{ 586 "lscc": {}, 587 "qscc": {}, 588 "cscc": {}, 589 "_lifecycle": {}, 590 } 591 592 lsccInst := &lscc.SCC{ 593 BuiltinSCCs: builtinSCCs, 594 Support: &lscc.SupportImpl{ 595 GetMSPIDs: peerInstance.GetMSPIDs, 596 }, 597 SCCProvider: &lscc.PeerShim{Peer: peerInstance}, 598 ACLProvider: aclProvider, 599 GetMSPIDs: peerInstance.GetMSPIDs, 600 PolicyChecker: policyChecker, 601 BCCSP: factory.GetDefault(), 602 BuildRegistry: buildRegistry, 603 ChaincodeBuilder: containerRouter, 604 EbMetadataProvider: ebMetadataProvider, 605 } 606 607 chaincodeEndorsementInfo := &lifecycle.ChaincodeEndorsementInfoSource{ 608 LegacyImpl: lsccInst, 609 Resources: lifecycleResources, 610 Cache: lifecycleCache, 611 BuiltinSCCs: builtinSCCs, 612 } 613 614 containerRuntime := &chaincode.ContainerRuntime{ 615 BuildRegistry: buildRegistry, 616 ContainerRouter: containerRouter, 617 } 618 619 lifecycleFunctions := &lifecycle.ExternalFunctions{ 620 Resources: lifecycleResources, 621 InstallListener: lifecycleCache, 622 InstalledChaincodesLister: lifecycleCache, 623 ChaincodeBuilder: containerRouter, 624 BuildRegistry: buildRegistry, 625 } 626 627 lifecycleSCC := &lifecycle.SCC{ 628 Dispatcher: &dispatcher.Dispatcher{ 629 Protobuf: &dispatcher.ProtobufImpl{}, 630 }, 631 DeployedCCInfoProvider: lifecycleValidatorCommitter, 632 QueryExecutorProvider: lifecycleTxQueryExecutorGetter, 633 Functions: lifecycleFunctions, 634 OrgMSPID: mspID, 635 ChannelConfigSource: peerInstance, 636 ACLProvider: aclProvider, 637 } 638 639 chaincodeLauncher := &chaincode.RuntimeLauncher{ 640 Metrics: chaincode.NewLaunchMetrics(opsSystem.Provider), 641 Registry: chaincodeHandlerRegistry, 642 Runtime: containerRuntime, 643 StartupTimeout: chaincodeConfig.StartupTimeout, 644 CertGenerator: authenticator, 645 CACert: ca.CertBytes(), 646 PeerAddress: ccEndpoint, 647 ConnectionHandler: &extcc.ExternalChaincodeRuntime{}, 648 } 649 650 // Keep TestQueries working 651 if !chaincodeConfig.TLSEnabled { 652 chaincodeLauncher.CertGenerator = nil 653 } 654 655 chaincodeSupport := &chaincode.ChaincodeSupport{ 656 ACLProvider: aclProvider, 657 AppConfig: peerInstance, 658 DeployedCCInfoProvider: lifecycleValidatorCommitter, 659 ExecuteTimeout: chaincodeConfig.ExecuteTimeout, 660 InstallTimeout: chaincodeConfig.InstallTimeout, 661 HandlerRegistry: chaincodeHandlerRegistry, 662 HandlerMetrics: chaincode.NewHandlerMetrics(opsSystem.Provider), 663 Keepalive: chaincodeConfig.Keepalive, 664 Launcher: chaincodeLauncher, 665 Lifecycle: chaincodeEndorsementInfo, 666 Peer: peerInstance, 667 Runtime: containerRuntime, 668 BuiltinSCCs: builtinSCCs, 669 TotalQueryLimit: chaincodeConfig.TotalQueryLimit, 670 UserRunsCC: userRunsCC, 671 } 672 673 custodianLauncher := custodianLauncherAdapter{ 674 launcher: chaincodeLauncher, 675 streamHandler: chaincodeSupport, 676 } 677 go chaincodeCustodian.Work(buildRegistry, containerRouter, custodianLauncher) 678 679 ccSupSrv := pb.ChaincodeSupportServer(chaincodeSupport) 680 if tlsEnabled { 681 ccSupSrv = authenticator.Wrap(ccSupSrv) 682 } 683 684 csccInst := cscc.New( 685 aclProvider, 686 lifecycleValidatorCommitter, 687 lsccInst, 688 lifecycleValidatorCommitter, 689 policyChecker, 690 peerInstance, 691 factory.GetDefault(), 692 ) 693 qsccInst := scc.SelfDescribingSysCC(qscc.New(aclProvider, peerInstance)) 694 695 pb.RegisterChaincodeSupportServer(ccSrv.Server(), ccSupSrv) 696 697 // start the chaincode specific gRPC listening service 698 go ccSrv.Start() 699 700 logger.Debugf("Running peer") 701 702 libConf, err := library.LoadConfig() 703 if err != nil { 704 return errors.WithMessage(err, "could not decode peer handlers configuration") 705 } 706 707 reg := library.InitRegistry(libConf) 708 709 authFilters := reg.Lookup(library.Auth).([]authHandler.Filter) 710 endorserSupport := &endorser.SupportImpl{ 711 SignerSerializer: signingIdentity, 712 Peer: peerInstance, 713 ChaincodeSupport: chaincodeSupport, 714 ACLProvider: aclProvider, 715 BuiltinSCCs: builtinSCCs, 716 } 717 endorsementPluginsByName := reg.Lookup(library.Endorsement).(map[string]endorsement2.PluginFactory) 718 validationPluginsByName := reg.Lookup(library.Validation).(map[string]validation.PluginFactory) 719 signingIdentityFetcher := (endorsement3.SigningIdentityFetcher)(endorserSupport) 720 channelStateRetriever := endorser.ChannelStateRetriever(endorserSupport) 721 pluginMapper := endorser.MapBasedPluginMapper(endorsementPluginsByName) 722 pluginEndorser := endorser.NewPluginEndorser(&endorser.PluginSupport{ 723 ChannelStateRetriever: channelStateRetriever, 724 TransientStoreRetriever: peerInstance, 725 PluginMapper: pluginMapper, 726 SigningIdentityFetcher: signingIdentityFetcher, 727 }) 728 endorserSupport.PluginEndorser = pluginEndorser 729 channelFetcher := endorserChannelAdapter{ 730 peer: peerInstance, 731 } 732 serverEndorser := &endorser.Endorser{ 733 PrivateDataDistributor: gossipService, 734 ChannelFetcher: channelFetcher, 735 LocalMSP: localMSP, 736 Support: endorserSupport, 737 Metrics: endorser.NewMetrics(metricsProvider), 738 } 739 740 // deploy system chaincodes 741 for _, cc := range []scc.SelfDescribingSysCC{lsccInst, csccInst, qsccInst, lifecycleSCC} { 742 if enabled, ok := chaincodeConfig.SCCWhitelist[cc.Name()]; !ok || !enabled { 743 logger.Infof("not deploying chaincode %s as it is not enabled", cc.Name()) 744 continue 745 } 746 scc.DeploySysCC(cc, chaincodeSupport) 747 } 748 749 logger.Infof("Deployed system chaincodes") 750 751 // register the lifecycleMetadataManager to get updates from the legacy 752 // chaincode; lifecycleMetadataManager will aggregate these updates with 753 // the ones from the new lifecycle and deliver both 754 // this is expected to disappear with FAB-15061 755 legacyMetadataManager.AddListener(metadataManager) 756 757 // register gossip as a listener for updates from lifecycleMetadataManager 758 metadataManager.AddListener(lifecycle.HandleMetadataUpdateFunc(func(channel string, chaincodes ccdef.MetadataSet) { 759 gossipService.UpdateChaincodes(chaincodes.AsChaincodes(), gossipcommon.ChannelID(channel)) 760 })) 761 762 // this brings up all the channels 763 peerInstance.Initialize( 764 func(cid string) { 765 // initialize the metadata for this channel. 766 // This call will pre-populate chaincode information for this 767 // channel but it won't fire any updates to its listeners 768 lifecycleCache.InitializeMetadata(cid) 769 770 // initialize the legacyMetadataManager for this channel. 771 // This call will pre-populate chaincode information from 772 // the legacy lifecycle for this channel; it will also fire 773 // the listener, which will cascade to metadataManager 774 // and eventually to gossip to pre-populate data structures. 775 // this is expected to disappear with FAB-15061 776 sub, err := legacyMetadataManager.NewChannelSubscription(cid, cclifecycle.QueryCreatorFunc(func() (cclifecycle.Query, error) { 777 return peerInstance.GetLedger(cid).NewQueryExecutor() 778 })) 779 if err != nil { 780 logger.Panicf("Failed subscribing to chaincode lifecycle updates") 781 } 782 783 // register this channel's legacyMetadataManager (sub) to get ledger updates 784 // this is expected to disappear with FAB-15061 785 cceventmgmt.GetMgr().Register(cid, sub) 786 }, 787 peerServer, 788 plugin.MapBasedMapper(validationPluginsByName), 789 lifecycleValidatorCommitter, 790 lsccInst, 791 lifecycleValidatorCommitter, 792 coreConfig.ValidatorPoolSize, 793 ) 794 795 if coreConfig.DiscoveryEnabled { 796 registerDiscoveryService( 797 coreConfig, 798 peerInstance, 799 peerServer, 800 policyMgr, 801 lifecycle.NewMetadataProvider( 802 lifecycleCache, 803 legacyMetadataManager, 804 peerInstance, 805 ), 806 gossipService, 807 ) 808 } 809 810 logger.Infof("Starting peer with ID=[%s], network ID=[%s], address=[%s]", coreConfig.PeerID, coreConfig.NetworkID, coreConfig.PeerAddress) 811 812 // Get configuration before starting go routines to avoid 813 // racing in tests 814 profileEnabled := coreConfig.ProfileEnabled 815 profileListenAddress := coreConfig.ProfileListenAddress 816 817 // Start the grpc server. Done in a goroutine so we can deploy the 818 // genesis block if needed. 819 serve := make(chan error) 820 821 // Start profiling http endpoint if enabled 822 if profileEnabled { 823 go func() { 824 logger.Infof("Starting profiling server with listenAddress = %s", profileListenAddress) 825 if profileErr := http.ListenAndServe(profileListenAddress, nil); profileErr != nil { 826 logger.Errorf("Error starting profiler: %s", profileErr) 827 } 828 }() 829 } 830 831 handleSignals(addPlatformSignals(map[os.Signal]func(){ 832 syscall.SIGINT: func() { serve <- nil }, 833 syscall.SIGTERM: func() { serve <- nil }, 834 })) 835 836 logger.Infof("Started peer with ID=[%s], network ID=[%s], address=[%s]", coreConfig.PeerID, coreConfig.NetworkID, coreConfig.PeerAddress) 837 838 // get a list of ledger IDs and load preResetHeight files for these ledger IDs 839 ledgerIDs, err := peerInstance.LedgerMgr.GetLedgerIDs() 840 if err != nil { 841 return errors.WithMessage(err, "failed to get ledger IDs") 842 } 843 844 // check to see if the peer ledgers have been reset 845 rootFSPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "ledgersData") 846 preResetHeights, err := kvledger.LoadPreResetHeight(rootFSPath, ledgerIDs) 847 if err != nil { 848 return fmt.Errorf("error loading prereset height: %s", err) 849 } 850 851 for cid, height := range preResetHeights { 852 logger.Infof("Ledger rebuild: channel [%s]: preresetHeight: [%d]", cid, height) 853 } 854 855 if len(preResetHeights) > 0 { 856 logger.Info("Ledger rebuild: Entering loop to check if current ledger heights surpass prereset ledger heights. Endorsement request processing will be disabled.") 857 resetFilter := &reset{ 858 reject: true, 859 } 860 authFilters = append(authFilters, resetFilter) 861 go resetLoop(resetFilter, preResetHeights, ledgerIDs, peerInstance.GetLedger, 10*time.Second) 862 } 863 864 // start the peer server 865 auth := authHandler.ChainFilters(serverEndorser, authFilters...) 866 // Register the Endorser server 867 pb.RegisterEndorserServer(peerServer.Server(), auth) 868 869 go func() { 870 var grpcErr error 871 if grpcErr = peerServer.Start(); grpcErr != nil { 872 grpcErr = fmt.Errorf("grpc server exited with error: %s", grpcErr) 873 } 874 serve <- grpcErr 875 }() 876 877 // Block until grpc server exits 878 return <-serve 879 } 880 881 func handleSignals(handlers map[os.Signal]func()) { 882 var signals []os.Signal 883 for sig := range handlers { 884 signals = append(signals, sig) 885 } 886 887 signalChan := make(chan os.Signal, 1) 888 signal.Notify(signalChan, signals...) 889 890 go func() { 891 for sig := range signalChan { 892 logger.Infof("Received signal: %d (%s)", sig, sig) 893 handlers[sig]() 894 } 895 }() 896 } 897 898 func localPolicy(policyObject proto.Message) policies.Policy { 899 localMSP := mgmt.GetLocalMSP(factory.GetDefault()) 900 pp := cauthdsl.NewPolicyProvider(localMSP) 901 policy, _, err := pp.NewPolicy(protoutil.MarshalOrPanic(policyObject)) 902 if err != nil { 903 logger.Panicf("Failed creating local policy: +%v", err) 904 } 905 return policy 906 } 907 908 func createSelfSignedData() protoutil.SignedData { 909 sID := mgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault()) 910 msg := make([]byte, 32) 911 sig, err := sID.Sign(msg) 912 if err != nil { 913 logger.Panicf("Failed creating self signed data because message signing failed: %v", err) 914 } 915 peerIdentity, err := sID.Serialize() 916 if err != nil { 917 logger.Panicf("Failed creating self signed data because peer identity couldn't be serialized: %v", err) 918 } 919 return protoutil.SignedData{ 920 Data: msg, 921 Signature: sig, 922 Identity: peerIdentity, 923 } 924 } 925 926 func registerDiscoveryService( 927 coreConfig *peer.Config, 928 peerInstance *peer.Peer, 929 peerServer *comm.GRPCServer, 930 polMgr policies.ChannelPolicyManagerGetter, 931 metadataProvider *lifecycle.MetadataProvider, 932 gossipService *gossipservice.GossipService, 933 ) { 934 mspID := coreConfig.LocalMSPID 935 localAccessPolicy := localPolicy(policydsl.SignedByAnyAdmin([]string{mspID})) 936 if coreConfig.DiscoveryOrgMembersAllowed { 937 localAccessPolicy = localPolicy(policydsl.SignedByAnyMember([]string{mspID})) 938 } 939 channelVerifier := discacl.NewChannelVerifier(policies.ChannelApplicationWriters, polMgr) 940 acl := discacl.NewDiscoverySupport(channelVerifier, localAccessPolicy, discacl.ChannelConfigGetterFunc(peerInstance.GetStableChannelConfig)) 941 gSup := gossip.NewDiscoverySupport(gossipService) 942 ccSup := ccsupport.NewDiscoverySupport(metadataProvider) 943 ea := endorsement.NewEndorsementAnalyzer(gSup, ccSup, acl, metadataProvider) 944 confSup := config.NewDiscoverySupport(config.CurrentConfigBlockGetterFunc(func(channelID string) *common.Block { 945 channel := peerInstance.Channel(channelID) 946 if channel == nil { 947 return nil 948 } 949 block, err := peer.ConfigBlockFromLedger(channel.Ledger()) 950 if err != nil { 951 logger.Error("failed to get config block", err) 952 return nil 953 } 954 return block 955 })) 956 support := discsupport.NewDiscoverySupport(acl, gSup, ea, confSup, acl) 957 svc := discovery.NewService(discovery.Config{ 958 TLS: peerServer.TLSEnabled(), 959 AuthCacheEnabled: coreConfig.DiscoveryAuthCacheEnabled, 960 AuthCacheMaxSize: coreConfig.DiscoveryAuthCacheMaxSize, 961 AuthCachePurgeRetentionRatio: coreConfig.DiscoveryAuthCachePurgeRetentionRatio, 962 }, support) 963 logger.Info("Discovery service activated") 964 discprotos.RegisterDiscoveryServer(peerServer.Server(), svc) 965 } 966 967 // create a CC listener using peer.chaincodeListenAddress (and if that's not set use peer.peerAddress) 968 func createChaincodeServer(coreConfig *peer.Config, ca tlsgen.CA, peerHostname string) (srv *comm.GRPCServer, ccEndpoint string, err error) { 969 // before potentially setting chaincodeListenAddress, compute chaincode endpoint at first 970 ccEndpoint, err = computeChaincodeEndpoint(coreConfig.ChaincodeAddress, coreConfig.ChaincodeListenAddress, peerHostname) 971 if err != nil { 972 if chaincode.IsDevMode() { 973 // if any error for dev mode, we use 0.0.0.0:7052 974 ccEndpoint = fmt.Sprintf("%s:%d", "0.0.0.0", defaultChaincodePort) 975 logger.Warningf("use %s as chaincode endpoint because of error in computeChaincodeEndpoint: %s", ccEndpoint, err) 976 } else { 977 // for non-dev mode, we have to return error 978 logger.Errorf("Error computing chaincode endpoint: %s", err) 979 return nil, "", err 980 } 981 } 982 983 host, _, err := net.SplitHostPort(ccEndpoint) 984 if err != nil { 985 logger.Panic("Chaincode service host", ccEndpoint, "isn't a valid hostname:", err) 986 } 987 988 cclistenAddress := coreConfig.ChaincodeListenAddress 989 if cclistenAddress == "" { 990 cclistenAddress = fmt.Sprintf("%s:%d", peerHostname, defaultChaincodePort) 991 logger.Warningf("%s is not set, using %s", chaincodeListenAddrKey, cclistenAddress) 992 coreConfig.ChaincodeListenAddress = cclistenAddress 993 } 994 995 config, err := peer.GetServerConfig() 996 if err != nil { 997 logger.Errorf("Error getting server config: %s", err) 998 return nil, "", err 999 } 1000 1001 // set the logger for the server 1002 config.Logger = flogging.MustGetLogger("core.comm").With("server", "ChaincodeServer") 1003 1004 // Override TLS configuration if TLS is applicable 1005 if config.SecOpts.UseTLS { 1006 // Create a self-signed TLS certificate with a SAN that matches the computed chaincode endpoint 1007 certKeyPair, err := ca.NewServerCertKeyPair(host) 1008 if err != nil { 1009 logger.Panicf("Failed generating TLS certificate for chaincode service: +%v", err) 1010 } 1011 config.SecOpts = comm.SecureOptions{ 1012 UseTLS: true, 1013 // Require chaincode shim to authenticate itself 1014 RequireClientCert: true, 1015 // Trust only client certificates signed by ourselves 1016 ClientRootCAs: [][]byte{ca.CertBytes()}, 1017 // Use our own self-signed TLS certificate and key 1018 Certificate: certKeyPair.Cert, 1019 Key: certKeyPair.Key, 1020 // No point in specifying server root CAs since this TLS config is only used for 1021 // a gRPC server and not a client 1022 ServerRootCAs: nil, 1023 } 1024 } 1025 1026 // Chaincode keepalive options - static for now 1027 chaincodeKeepaliveOptions := comm.KeepaliveOptions{ 1028 ServerInterval: time.Duration(2) * time.Hour, // 2 hours - gRPC default 1029 ServerTimeout: time.Duration(20) * time.Second, // 20 sec - gRPC default 1030 ServerMinInterval: time.Duration(1) * time.Minute, // match ClientInterval 1031 } 1032 config.KaOpts = chaincodeKeepaliveOptions 1033 config.HealthCheckEnabled = true 1034 1035 srv, err = comm.NewGRPCServer(cclistenAddress, config) 1036 if err != nil { 1037 logger.Errorf("Error creating GRPC server: %s", err) 1038 return nil, "", err 1039 } 1040 1041 return srv, ccEndpoint, nil 1042 } 1043 1044 // computeChaincodeEndpoint will utilize chaincode address, chaincode listen 1045 // address (these two are from viper) and peer address to compute chaincode endpoint. 1046 // There could be following cases of computing chaincode endpoint: 1047 // Case A: if chaincodeAddrKey is set, use it if not "0.0.0.0" (or "::") 1048 // Case B: else if chaincodeListenAddressKey is set and not "0.0.0.0" or ("::"), use it 1049 // Case C: else use peer address if not "0.0.0.0" (or "::") 1050 // Case D: else return error 1051 func computeChaincodeEndpoint(chaincodeAddress string, chaincodeListenAddress string, peerHostname string) (ccEndpoint string, err error) { 1052 logger.Infof("Entering computeChaincodeEndpoint with peerHostname: %s", peerHostname) 1053 // Case A: the chaincodeAddrKey is set 1054 if chaincodeAddress != "" { 1055 host, _, err := net.SplitHostPort(chaincodeAddress) 1056 if err != nil { 1057 logger.Errorf("Fail to split chaincodeAddress: %s", err) 1058 return "", err 1059 } 1060 ccIP := net.ParseIP(host) 1061 if ccIP != nil && ccIP.IsUnspecified() { 1062 logger.Errorf("ChaincodeAddress' IP cannot be %s in non-dev mode", ccIP) 1063 return "", errors.New("invalid endpoint for chaincode to connect") 1064 } 1065 logger.Infof("Exit with ccEndpoint: %s", chaincodeAddress) 1066 return chaincodeAddress, nil 1067 } 1068 1069 // Case B: chaincodeListenAddrKey is set 1070 if chaincodeListenAddress != "" { 1071 ccEndpoint = chaincodeListenAddress 1072 host, port, err := net.SplitHostPort(ccEndpoint) 1073 if err != nil { 1074 logger.Errorf("ChaincodeAddress is nil and fail to split chaincodeListenAddress: %s", err) 1075 return "", err 1076 } 1077 1078 ccListenerIP := net.ParseIP(host) 1079 // ignoring other values such as Multicast address etc ...as the server 1080 // wouldn't start up with this address anyway 1081 if ccListenerIP != nil && ccListenerIP.IsUnspecified() { 1082 // Case C: if "0.0.0.0" or "::", we have to use peer address with the listen port 1083 peerIP := net.ParseIP(peerHostname) 1084 if peerIP != nil && peerIP.IsUnspecified() { 1085 // Case D: all we have is "0.0.0.0" or "::" which chaincode cannot connect to 1086 logger.Error("ChaincodeAddress is nil while both chaincodeListenAddressIP and peerIP are 0.0.0.0") 1087 return "", errors.New("invalid endpoint for chaincode to connect") 1088 } 1089 ccEndpoint = fmt.Sprintf("%s:%s", peerHostname, port) 1090 } 1091 logger.Infof("Exit with ccEndpoint: %s", ccEndpoint) 1092 return ccEndpoint, nil 1093 } 1094 1095 // Case C: chaincodeListenAddrKey is not set, use peer address 1096 peerIP := net.ParseIP(peerHostname) 1097 if peerIP != nil && peerIP.IsUnspecified() { 1098 // Case D: all we have is "0.0.0.0" or "::" which chaincode cannot connect to 1099 logger.Errorf("ChaincodeAddress and chaincodeListenAddress are nil and peerIP is %s", peerIP) 1100 return "", errors.New("invalid endpoint for chaincode to connect") 1101 } 1102 1103 // use peerAddress:defaultChaincodePort 1104 ccEndpoint = fmt.Sprintf("%s:%d", peerHostname, defaultChaincodePort) 1105 1106 logger.Infof("Exit with ccEndpoint: %s", ccEndpoint) 1107 return ccEndpoint, nil 1108 } 1109 1110 func createDockerClient(coreConfig *peer.Config) (*docker.Client, error) { 1111 if coreConfig.VMDockerTLSEnabled { 1112 return docker.NewTLSClient(coreConfig.VMEndpoint, coreConfig.DockerCert, coreConfig.DockerKey, coreConfig.DockerCA) 1113 } 1114 return docker.NewClient(coreConfig.VMEndpoint) 1115 } 1116 1117 // secureDialOpts is the callback function for secure dial options for gossip service 1118 func secureDialOpts(credSupport *comm.CredentialSupport) func() []grpc.DialOption { 1119 return func() []grpc.DialOption { 1120 var dialOpts []grpc.DialOption 1121 // set max send/recv msg sizes 1122 dialOpts = append( 1123 dialOpts, 1124 grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(comm.MaxRecvMsgSize), grpc.MaxCallSendMsgSize(comm.MaxSendMsgSize)), 1125 ) 1126 // set the keepalive options 1127 kaOpts := comm.DefaultKeepaliveOptions 1128 if viper.IsSet("peer.keepalive.client.interval") { 1129 kaOpts.ClientInterval = viper.GetDuration("peer.keepalive.client.interval") 1130 } 1131 if viper.IsSet("peer.keepalive.client.timeout") { 1132 kaOpts.ClientTimeout = viper.GetDuration("peer.keepalive.client.timeout") 1133 } 1134 dialOpts = append(dialOpts, comm.ClientKeepaliveOptions(kaOpts)...) 1135 1136 if viper.GetBool("peer.tls.enabled") { 1137 dialOpts = append(dialOpts, grpc.WithTransportCredentials(credSupport.GetPeerCredentials())) 1138 } else { 1139 dialOpts = append(dialOpts, grpc.WithInsecure()) 1140 } 1141 return dialOpts 1142 } 1143 } 1144 1145 // initGossipService will initialize the gossip service by: 1146 // 1. Enable TLS if configured; 1147 // 2. Init the message crypto service; 1148 // 3. Init the security advisor; 1149 // 4. Init gossip related struct. 1150 func initGossipService( 1151 policyMgr policies.ChannelPolicyManagerGetter, 1152 metricsProvider metrics.Provider, 1153 peerServer *comm.GRPCServer, 1154 signer msp.SigningIdentity, 1155 credSupport *comm.CredentialSupport, 1156 peerAddress string, 1157 deliverGRPCClient *comm.GRPCClient, 1158 deliverServiceConfig *deliverservice.DeliverServiceConfig, 1159 privdataConfig *gossipprivdata.PrivdataConfig, 1160 ) (*gossipservice.GossipService, error) { 1161 1162 var certs *gossipcommon.TLSCertificates 1163 if peerServer.TLSEnabled() { 1164 serverCert := peerServer.ServerCertificate() 1165 clientCert, err := peer.GetClientCertificate() 1166 if err != nil { 1167 return nil, errors.Wrap(err, "failed obtaining client certificates") 1168 } 1169 certs = &gossipcommon.TLSCertificates{} 1170 certs.TLSServerCert.Store(&serverCert) 1171 certs.TLSClientCert.Store(&clientCert) 1172 } 1173 1174 messageCryptoService := peergossip.NewMCS( 1175 policyMgr, 1176 signer, 1177 mgmt.NewDeserializersManager(factory.GetDefault()), 1178 factory.GetDefault(), 1179 ) 1180 secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(factory.GetDefault())) 1181 bootstrap := viper.GetStringSlice("peer.gossip.bootstrap") 1182 1183 serviceConfig := service.GlobalConfig() 1184 if serviceConfig.Endpoint != "" { 1185 peerAddress = serviceConfig.Endpoint 1186 } 1187 gossipConfig, err := gossipgossip.GlobalConfig(peerAddress, certs, bootstrap...) 1188 if err != nil { 1189 return nil, errors.Wrap(err, "failed obtaining gossip config") 1190 } 1191 1192 return gossipservice.New( 1193 signer, 1194 gossipmetrics.NewGossipMetrics(metricsProvider), 1195 peerAddress, 1196 peerServer.Server(), 1197 messageCryptoService, 1198 secAdv, 1199 secureDialOpts(credSupport), 1200 credSupport, 1201 deliverGRPCClient, 1202 gossipConfig, 1203 serviceConfig, 1204 privdataConfig, 1205 deliverServiceConfig, 1206 ) 1207 } 1208 1209 func newOperationsSystem(coreConfig *peer.Config) *operations.System { 1210 return operations.NewSystem(operations.Options{ 1211 Logger: flogging.MustGetLogger("peer.operations"), 1212 ListenAddress: coreConfig.OperationsListenAddress, 1213 Metrics: operations.MetricsOptions{ 1214 Provider: coreConfig.MetricsProvider, 1215 Statsd: &operations.Statsd{ 1216 Network: coreConfig.StatsdNetwork, 1217 Address: coreConfig.StatsdAaddress, 1218 WriteInterval: coreConfig.StatsdWriteInterval, 1219 Prefix: coreConfig.StatsdPrefix, 1220 }, 1221 }, 1222 TLS: operations.TLS{ 1223 Enabled: coreConfig.OperationsTLSEnabled, 1224 CertFile: coreConfig.OperationsTLSCertFile, 1225 KeyFile: coreConfig.OperationsTLSKeyFile, 1226 ClientCertRequired: coreConfig.OperationsTLSClientAuthRequired, 1227 ClientCACertFiles: coreConfig.OperationsTLSClientRootCAs, 1228 }, 1229 Version: metadata.Version, 1230 }) 1231 } 1232 1233 func getDockerHostConfig() *docker.HostConfig { 1234 dockerKey := func(key string) string { return "vm.docker.hostConfig." + key } 1235 getInt64 := func(key string) int64 { return int64(viper.GetInt(dockerKey(key))) } 1236 1237 var logConfig docker.LogConfig 1238 err := viper.UnmarshalKey(dockerKey("LogConfig"), &logConfig) 1239 if err != nil { 1240 logger.Panicf("unable to parse Docker LogConfig: %s", err) 1241 } 1242 1243 networkMode := viper.GetString(dockerKey("NetworkMode")) 1244 if networkMode == "" { 1245 networkMode = "host" 1246 } 1247 1248 memorySwappiness := getInt64("MemorySwappiness") 1249 oomKillDisable := viper.GetBool(dockerKey("OomKillDisable")) 1250 1251 return &docker.HostConfig{ 1252 CapAdd: viper.GetStringSlice(dockerKey("CapAdd")), 1253 CapDrop: viper.GetStringSlice(dockerKey("CapDrop")), 1254 1255 DNS: viper.GetStringSlice(dockerKey("Dns")), 1256 DNSSearch: viper.GetStringSlice(dockerKey("DnsSearch")), 1257 ExtraHosts: viper.GetStringSlice(dockerKey("ExtraHosts")), 1258 NetworkMode: networkMode, 1259 IpcMode: viper.GetString(dockerKey("IpcMode")), 1260 PidMode: viper.GetString(dockerKey("PidMode")), 1261 UTSMode: viper.GetString(dockerKey("UTSMode")), 1262 LogConfig: logConfig, 1263 1264 ReadonlyRootfs: viper.GetBool(dockerKey("ReadonlyRootfs")), 1265 SecurityOpt: viper.GetStringSlice(dockerKey("SecurityOpt")), 1266 CgroupParent: viper.GetString(dockerKey("CgroupParent")), 1267 Memory: getInt64("Memory"), 1268 MemorySwap: getInt64("MemorySwap"), 1269 MemorySwappiness: &memorySwappiness, 1270 OOMKillDisable: &oomKillDisable, 1271 CPUShares: getInt64("CpuShares"), 1272 CPUSet: viper.GetString(dockerKey("Cpuset")), 1273 CPUSetCPUs: viper.GetString(dockerKey("CpusetCPUs")), 1274 CPUSetMEMs: viper.GetString(dockerKey("CpusetMEMs")), 1275 CPUQuota: getInt64("CpuQuota"), 1276 CPUPeriod: getInt64("CpuPeriod"), 1277 BlkioWeight: getInt64("BlkioWeight"), 1278 } 1279 } 1280 1281 //go:generate counterfeiter -o mock/get_ledger.go -fake-name GetLedger . getLedger 1282 //go:generate counterfeiter -o mock/peer_ledger.go -fake-name PeerLedger . peerLedger 1283 1284 type peerLedger interface { 1285 ledger.PeerLedger 1286 } 1287 1288 type getLedger func(string) ledger.PeerLedger 1289 1290 func resetLoop( 1291 resetFilter *reset, 1292 preResetHeights map[string]uint64, 1293 ledgerIDs []string, 1294 pLedger getLedger, 1295 interval time.Duration, 1296 ) { 1297 // periodically check to see if current ledger height(s) surpass prereset height(s) 1298 ticker := time.NewTicker(interval) 1299 1300 defer ticker.Stop() 1301 for { 1302 select { 1303 case <-ticker.C: 1304 logger.Info("Ledger rebuild: Checking if current ledger heights surpass prereset ledger heights") 1305 logger.Debugf("Ledger rebuild: Number of ledgers still rebuilding before check: %d", len(preResetHeights)) 1306 for cid, height := range preResetHeights { 1307 var l peerLedger 1308 l = pLedger(cid) 1309 if l == nil { 1310 logger.Warningf("No ledger found for channel [%s]", cid) 1311 continue 1312 } 1313 bcInfo, err := l.GetBlockchainInfo() 1314 if bcInfo != nil { 1315 logger.Debugf("Ledger rebuild: channel [%s]: currentHeight [%d] : preresetHeight [%d]", cid, bcInfo.GetHeight(), height) 1316 if bcInfo.GetHeight() >= height { 1317 delete(preResetHeights, cid) 1318 } else { 1319 break 1320 } 1321 } else { 1322 if err != nil { 1323 logger.Warningf("Ledger rebuild: could not retrieve info for channel [%s]: %s", cid, err.Error()) 1324 } 1325 } 1326 } 1327 1328 logger.Debugf("Ledger rebuild: Number of ledgers still rebuilding after check: %d", len(preResetHeights)) 1329 if len(preResetHeights) == 0 { 1330 logger.Infof("Ledger rebuild: Complete, all ledgers surpass prereset heights. Endorsement request processing will be enabled.") 1331 rootFSPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "ledgersData") 1332 err := kvledger.ClearPreResetHeight(rootFSPath, ledgerIDs) 1333 if err != nil { 1334 logger.Warningf("Ledger rebuild: could not clear off prerest files: error=%s", err) 1335 } 1336 resetFilter.setReject(false) 1337 return 1338 } 1339 } 1340 } 1341 } 1342 1343 //implements the auth.Filter interface 1344 type reset struct { 1345 sync.RWMutex 1346 next pb.EndorserServer 1347 reject bool 1348 } 1349 1350 func (r *reset) setReject(reject bool) { 1351 r.Lock() 1352 defer r.Unlock() 1353 r.reject = reject 1354 } 1355 1356 // Init initializes Reset with the next EndorserServer 1357 func (r *reset) Init(next pb.EndorserServer) { 1358 r.next = next 1359 } 1360 1361 // ProcessProposal processes a signed proposal 1362 func (r *reset) ProcessProposal(ctx context.Context, signedProp *pb.SignedProposal) (*pb.ProposalResponse, error) { 1363 r.RLock() 1364 defer r.RUnlock() 1365 if r.reject { 1366 return nil, errors.New("endorse requests are blocked while ledgers are being rebuilt") 1367 } 1368 return r.next.ProcessProposal(ctx, signedProp) 1369 }