github.com/sykesm/fabric@v1.1.0-preview.0.20200129034918-2aa12b1a0181/internal/peer/node/start.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package node 8 9 import ( 10 "context" 11 "fmt" 12 "io" 13 "io/ioutil" 14 "net" 15 "net/http" 16 "os" 17 "os/signal" 18 "path/filepath" 19 "sync" 20 "syscall" 21 "time" 22 23 docker "github.com/fsouza/go-dockerclient" 24 "github.com/golang/protobuf/proto" 25 "github.com/hyperledger/fabric-protos-go/common" 26 cb "github.com/hyperledger/fabric-protos-go/common" 27 discprotos "github.com/hyperledger/fabric-protos-go/discovery" 28 pb "github.com/hyperledger/fabric-protos-go/peer" 29 "github.com/hyperledger/fabric/bccsp/factory" 30 "github.com/hyperledger/fabric/common/cauthdsl" 31 ccdef "github.com/hyperledger/fabric/common/chaincode" 32 "github.com/hyperledger/fabric/common/crypto" 33 "github.com/hyperledger/fabric/common/crypto/tlsgen" 34 "github.com/hyperledger/fabric/common/deliver" 35 "github.com/hyperledger/fabric/common/flogging" 36 floggingmetrics "github.com/hyperledger/fabric/common/flogging/metrics" 37 "github.com/hyperledger/fabric/common/grpclogging" 38 "github.com/hyperledger/fabric/common/grpcmetrics" 39 "github.com/hyperledger/fabric/common/metadata" 40 "github.com/hyperledger/fabric/common/metrics" 41 "github.com/hyperledger/fabric/common/policies" 42 "github.com/hyperledger/fabric/core/aclmgmt" 43 "github.com/hyperledger/fabric/core/cclifecycle" 44 "github.com/hyperledger/fabric/core/chaincode" 45 "github.com/hyperledger/fabric/core/chaincode/accesscontrol" 46 "github.com/hyperledger/fabric/core/chaincode/extcc" 47 "github.com/hyperledger/fabric/core/chaincode/lifecycle" 48 "github.com/hyperledger/fabric/core/chaincode/persistence" 49 "github.com/hyperledger/fabric/core/chaincode/platforms" 50 "github.com/hyperledger/fabric/core/comm" 51 "github.com/hyperledger/fabric/core/committer/txvalidator/plugin" 52 "github.com/hyperledger/fabric/core/common/ccprovider" 53 "github.com/hyperledger/fabric/core/common/privdata" 54 coreconfig "github.com/hyperledger/fabric/core/config" 55 "github.com/hyperledger/fabric/core/container" 56 "github.com/hyperledger/fabric/core/container/dockercontroller" 57 "github.com/hyperledger/fabric/core/container/externalbuilder" 58 "github.com/hyperledger/fabric/core/deliverservice" 59 "github.com/hyperledger/fabric/core/dispatcher" 60 "github.com/hyperledger/fabric/core/endorser" 61 authHandler "github.com/hyperledger/fabric/core/handlers/auth" 62 endorsement2 "github.com/hyperledger/fabric/core/handlers/endorsement/api" 63 endorsement3 "github.com/hyperledger/fabric/core/handlers/endorsement/api/identities" 64 "github.com/hyperledger/fabric/core/handlers/library" 65 validation "github.com/hyperledger/fabric/core/handlers/validation/api" 66 "github.com/hyperledger/fabric/core/ledger" 67 "github.com/hyperledger/fabric/core/ledger/cceventmgmt" 68 "github.com/hyperledger/fabric/core/ledger/kvledger" 69 "github.com/hyperledger/fabric/core/ledger/ledgermgmt" 70 "github.com/hyperledger/fabric/core/operations" 71 "github.com/hyperledger/fabric/core/peer" 72 "github.com/hyperledger/fabric/core/policy" 73 "github.com/hyperledger/fabric/core/scc" 74 "github.com/hyperledger/fabric/core/scc/cscc" 75 "github.com/hyperledger/fabric/core/scc/lscc" 76 "github.com/hyperledger/fabric/core/scc/qscc" 77 "github.com/hyperledger/fabric/core/transientstore" 78 "github.com/hyperledger/fabric/discovery" 79 "github.com/hyperledger/fabric/discovery/endorsement" 80 discsupport "github.com/hyperledger/fabric/discovery/support" 81 discacl "github.com/hyperledger/fabric/discovery/support/acl" 82 ccsupport "github.com/hyperledger/fabric/discovery/support/chaincode" 83 "github.com/hyperledger/fabric/discovery/support/config" 84 "github.com/hyperledger/fabric/discovery/support/gossip" 85 gossipcommon "github.com/hyperledger/fabric/gossip/common" 86 gossipgossip "github.com/hyperledger/fabric/gossip/gossip" 87 gossipmetrics "github.com/hyperledger/fabric/gossip/metrics" 88 "github.com/hyperledger/fabric/gossip/service" 89 gossipservice "github.com/hyperledger/fabric/gossip/service" 90 peergossip "github.com/hyperledger/fabric/internal/peer/gossip" 91 "github.com/hyperledger/fabric/internal/peer/version" 92 "github.com/hyperledger/fabric/msp" 93 "github.com/hyperledger/fabric/msp/mgmt" 94 "github.com/hyperledger/fabric/protoutil" 95 "github.com/pkg/errors" 96 "github.com/spf13/cobra" 97 "github.com/spf13/viper" 98 "google.golang.org/grpc" 99 ) 100 101 const ( 102 chaincodeAddrKey = "peer.chaincodeAddress" 103 chaincodeListenAddrKey = "peer.chaincodeListenAddress" 104 defaultChaincodePort = 7052 105 ) 106 107 var chaincodeDevMode bool 108 109 func startCmd() *cobra.Command { 110 // Set the flags on the node start command. 111 flags := nodeStartCmd.Flags() 112 flags.BoolVarP(&chaincodeDevMode, "peer-chaincodedev", "", false, "start peer in chaincode development mode") 113 return nodeStartCmd 114 } 115 116 var nodeStartCmd = &cobra.Command{ 117 Use: "start", 118 Short: "Starts the node.", 119 Long: `Starts a node that interacts with the network.`, 120 RunE: func(cmd *cobra.Command, args []string) error { 121 if len(args) != 0 { 122 return fmt.Errorf("trailing args detected") 123 } 124 // Parsing of the command line is done so silence cmd usage 125 cmd.SilenceUsage = true 126 return serve(args) 127 }, 128 } 129 130 // externalVMAdapter adapts coerces the result of Build to the 131 // container.Interface type expected by the VM interface. 132 type externalVMAdapter struct { 133 detector *externalbuilder.Detector 134 } 135 136 func (e externalVMAdapter) Build( 137 ccid string, 138 mdBytes []byte, 139 codePackage io.Reader, 140 ) (container.Instance, error) { 141 i, err := e.detector.Build(ccid, mdBytes, codePackage) 142 if err != nil { 143 return nil, err 144 } 145 146 // ensure <nil> is returned instead of (*externalbuilder.Instance)(nil) 147 if i == nil { 148 return nil, nil 149 } 150 return i, err 151 } 152 153 type endorserChannelAdapter struct { 154 peer *peer.Peer 155 } 156 157 func (e endorserChannelAdapter) Channel(channelID string) *endorser.Channel { 158 if peerChannel := e.peer.Channel(channelID); peerChannel != nil { 159 return &endorser.Channel{ 160 IdentityDeserializer: peerChannel.MSPManager(), 161 } 162 } 163 164 return nil 165 } 166 167 type custodianLauncherAdapter struct { 168 launcher chaincode.Launcher 169 streamHandler extcc.StreamHandler 170 } 171 172 func (c custodianLauncherAdapter) Launch(ccid string) error { 173 return c.launcher.Launch(ccid, c.streamHandler) 174 } 175 176 func (c custodianLauncherAdapter) Stop(ccid string) error { 177 return c.launcher.Stop(ccid) 178 } 179 180 func serve(args []string) error { 181 // currently the peer only works with the standard MSP 182 // because in certain scenarios the MSP has to make sure 183 // that from a single credential you only have a single 'identity'. 184 // Idemix does not support this *YET* but it can be easily 185 // fixed to support it. For now, we just make sure that 186 // the peer only comes up with the standard MSP 187 mspType := mgmt.GetLocalMSP(factory.GetDefault()).GetType() 188 if mspType != msp.FABRIC { 189 panic("Unsupported msp type " + msp.ProviderTypeToString(mspType)) 190 } 191 192 // Trace RPCs with the golang.org/x/net/trace package. This was moved out of 193 // the deliver service connection factory as it has process wide implications 194 // and was racy with respect to initialization of gRPC clients and servers. 195 grpc.EnableTracing = true 196 197 logger.Infof("Starting %s", version.GetInfo()) 198 199 //obtain coreConfiguration 200 coreConfig, err := peer.GlobalConfig() 201 if err != nil { 202 return err 203 } 204 205 platformRegistry := platforms.NewRegistry(platforms.SupportedPlatforms...) 206 207 identityDeserializerFactory := func(chainID string) msp.IdentityDeserializer { 208 return mgmt.GetManagerForChain(chainID) 209 } 210 211 opsSystem := newOperationsSystem(coreConfig) 212 err = opsSystem.Start() 213 if err != nil { 214 return errors.WithMessage(err, "failed to initialize operations subsystems") 215 } 216 defer opsSystem.Stop() 217 218 metricsProvider := opsSystem.Provider 219 logObserver := floggingmetrics.NewObserver(metricsProvider) 220 flogging.SetObserver(logObserver) 221 222 membershipInfoProvider := privdata.NewMembershipInfoProvider(createSelfSignedData(), identityDeserializerFactory) 223 224 mspID := coreConfig.LocalMSPID 225 226 chaincodeInstallPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "lifecycle", "chaincodes") 227 ccStore := persistence.NewStore(chaincodeInstallPath) 228 ccPackageParser := &persistence.ChaincodePackageParser{ 229 MetadataProvider: ccprovider.PersistenceAdapter(ccprovider.MetadataAsTarEntries), 230 } 231 232 peerHost, _, err := net.SplitHostPort(coreConfig.PeerAddress) 233 if err != nil { 234 return fmt.Errorf("peer address is not in the format of host:port: %v", err) 235 } 236 237 listenAddr := coreConfig.ListenAddress 238 serverConfig, err := peer.GetServerConfig() 239 if err != nil { 240 logger.Fatalf("Error loading secure config for peer (%s)", err) 241 } 242 243 serverConfig.Logger = flogging.MustGetLogger("core.comm").With("server", "PeerServer") 244 serverConfig.ServerStatsHandler = comm.NewServerStatsHandler(metricsProvider) 245 serverConfig.UnaryInterceptors = append( 246 serverConfig.UnaryInterceptors, 247 grpcmetrics.UnaryServerInterceptor(grpcmetrics.NewUnaryMetrics(metricsProvider)), 248 grpclogging.UnaryServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()), 249 ) 250 serverConfig.StreamInterceptors = append( 251 serverConfig.StreamInterceptors, 252 grpcmetrics.StreamServerInterceptor(grpcmetrics.NewStreamMetrics(metricsProvider)), 253 grpclogging.StreamServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()), 254 ) 255 256 cs := comm.NewCredentialSupport() 257 if serverConfig.SecOpts.UseTLS { 258 logger.Info("Starting peer with TLS enabled") 259 cs = comm.NewCredentialSupport(serverConfig.SecOpts.ServerRootCAs...) 260 261 // set the cert to use if client auth is requested by remote endpoints 262 clientCert, err := peer.GetClientCertificate() 263 if err != nil { 264 logger.Fatalf("Failed to set TLS client certificate (%s)", err) 265 } 266 cs.SetClientCertificate(clientCert) 267 } 268 269 transientStoreProvider, err := transientstore.NewStoreProvider( 270 filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "transientstore"), 271 ) 272 if err != nil { 273 return errors.WithMessage(err, "failed to open transient store") 274 } 275 276 deliverServiceConfig := deliverservice.GlobalConfig() 277 278 peerInstance := &peer.Peer{ 279 ServerConfig: serverConfig, 280 CredentialSupport: cs, 281 StoreProvider: transientStoreProvider, 282 CryptoProvider: factory.GetDefault(), 283 OrdererEndpointOverrides: deliverServiceConfig.OrdererEndpointOverrides, 284 } 285 286 localMSP := mgmt.GetLocalMSP(factory.GetDefault()) 287 signingIdentity, err := localMSP.GetDefaultSigningIdentity() 288 if err != nil { 289 logger.Panicf("Could not get the default signing identity from the local MSP: [%+v]", err) 290 } 291 292 signingIdentityBytes, err := signingIdentity.Serialize() 293 if err != nil { 294 logger.Panicf("Failed to serialize the signing identity: %v", err) 295 } 296 297 expirationLogger := flogging.MustGetLogger("certmonitor") 298 crypto.TrackExpiration( 299 serverConfig.SecOpts.UseTLS, 300 serverConfig.SecOpts.Certificate, 301 cs.GetClientCertificate().Certificate, 302 signingIdentityBytes, 303 expirationLogger.Warnf, // This can be used to piggyback a metric event in the future 304 time.Now(), 305 time.AfterFunc) 306 307 policyMgr := policies.PolicyManagerGetterFunc(peerInstance.GetPolicyManager) 308 309 deliverGRPCClient, err := comm.NewGRPCClient(comm.ClientConfig{ 310 Timeout: deliverServiceConfig.ConnectionTimeout, 311 KaOpts: deliverServiceConfig.KeepaliveOptions, 312 SecOpts: deliverServiceConfig.SecOpts, 313 }) 314 if err != nil { 315 logger.Panicf("Could not create the deliver grpc client: [%+v]", err) 316 } 317 318 policyChecker := policy.NewPolicyChecker( 319 policies.PolicyManagerGetterFunc(peerInstance.GetPolicyManager), 320 mgmt.GetLocalMSP(factory.GetDefault()), 321 mgmt.NewLocalMSPPrincipalGetter(factory.GetDefault()), 322 ) 323 324 //startup aclmgmt with default ACL providers (resource based and default 1.0 policies based). 325 //Users can pass in their own ACLProvider to RegisterACLProvider (currently unit tests do this) 326 aclProvider := aclmgmt.NewACLProvider( 327 aclmgmt.ResourceGetter(peerInstance.GetStableChannelConfig), 328 policyChecker, 329 ) 330 331 // TODO, unfortunately, the lifecycle initialization is very unclean at the 332 // moment. This is because ccprovider.SetChaincodePath only works after 333 // ledgermgmt.Initialize, but ledgermgmt.Initialize requires a reference to 334 // lifecycle. Finally, lscc requires a reference to the system chaincode 335 // provider in order to be created, which requires chaincode support to be 336 // up, which also requires, you guessed it, lifecycle. Once we remove the 337 // v1.0 lifecycle, we should be good to collapse all of the init of lifecycle 338 // to this point. 339 lifecycleResources := &lifecycle.Resources{ 340 Serializer: &lifecycle.Serializer{}, 341 ChannelConfigSource: peerInstance, 342 ChaincodeStore: ccStore, 343 PackageParser: ccPackageParser, 344 } 345 346 lifecycleValidatorCommitter := &lifecycle.ValidatorCommitter{ 347 Resources: lifecycleResources, 348 LegacyDeployedCCInfoProvider: &lscc.DeployedCCInfoProvider{}, 349 } 350 351 ccInfoFSImpl := &ccprovider.CCInfoFSImpl{GetHasher: factory.GetDefault()} 352 353 // legacyMetadataManager collects metadata information from the legacy 354 // lifecycle (lscc). This is expected to disappear with FAB-15061. 355 legacyMetadataManager, err := cclifecycle.NewMetadataManager( 356 cclifecycle.EnumerateFunc( 357 func() ([]ccdef.InstalledChaincode, error) { 358 return ccInfoFSImpl.ListInstalledChaincodes(ccInfoFSImpl.GetChaincodeInstallPath(), ioutil.ReadDir, ccprovider.LoadPackage) 359 }, 360 ), 361 ) 362 if err != nil { 363 logger.Panicf("Failed creating LegacyMetadataManager: +%v", err) 364 } 365 366 // metadataManager aggregates metadata information from _lifecycle and 367 // the legacy lifecycle (lscc). 368 metadataManager := lifecycle.NewMetadataManager() 369 370 // the purpose of these two managers is to feed per-channel chaincode data 371 // into gossip owing to the fact that we are transitioning from lscc to 372 // _lifecycle, we still have two providers of such information until v2.1, 373 // in which we will remove the legacy. 374 // 375 // the flow of information is the following 376 // 377 // gossip <-- metadataManager <-- lifecycleCache (for _lifecycle) 378 // \ 379 // - legacyMetadataManager (for lscc) 380 // 381 // FAB-15061 tracks the work necessary to remove LSCC, at which point we 382 // will be able to simplify the flow to simply be 383 // 384 // gossip <-- lifecycleCache 385 386 chaincodeCustodian := lifecycle.NewChaincodeCustodian() 387 388 externalBuilderOutput := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "externalbuilder", "builds") 389 if err := os.MkdirAll(externalBuilderOutput, 0700); err != nil { 390 logger.Panicf("could not create externalbuilder build output dir: %s", err) 391 } 392 393 ebMetadataProvider := &externalbuilder.MetadataProvider{ 394 DurablePath: externalBuilderOutput, 395 } 396 397 lifecycleCache := lifecycle.NewCache(lifecycleResources, mspID, metadataManager, chaincodeCustodian, ebMetadataProvider) 398 399 txProcessors := map[common.HeaderType]ledger.CustomTxProcessor{ 400 common.HeaderType_CONFIG: &peer.ConfigTxProcessor{}, 401 } 402 403 peerInstance.LedgerMgr = ledgermgmt.NewLedgerMgr( 404 &ledgermgmt.Initializer{ 405 CustomTxProcessors: txProcessors, 406 DeployedChaincodeInfoProvider: lifecycleValidatorCommitter, 407 MembershipInfoProvider: membershipInfoProvider, 408 ChaincodeLifecycleEventProvider: lifecycleCache, 409 MetricsProvider: metricsProvider, 410 HealthCheckRegistry: opsSystem, 411 StateListeners: []ledger.StateListener{lifecycleCache}, 412 Config: ledgerConfig(), 413 Hasher: factory.GetDefault(), 414 EbMetadataProvider: ebMetadataProvider, 415 }, 416 ) 417 418 peerServer, err := comm.NewGRPCServer(listenAddr, serverConfig) 419 if err != nil { 420 logger.Fatalf("Failed to create peer server (%s)", err) 421 } 422 423 // FIXME: Creating the gossip service has the side effect of starting a bunch 424 // of go routines and registration with the grpc server. 425 gossipService, err := initGossipService( 426 policyMgr, 427 metricsProvider, 428 peerServer, 429 signingIdentity, 430 cs, 431 coreConfig.PeerAddress, 432 deliverGRPCClient, 433 deliverServiceConfig, 434 ) 435 if err != nil { 436 return errors.WithMessage(err, "failed to initialize gossip service") 437 } 438 defer gossipService.Stop() 439 440 peerInstance.GossipService = gossipService 441 442 // Configure CC package storage 443 lsccInstallPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "chaincodes") 444 ccprovider.SetChaincodesPath(lsccInstallPath) 445 446 if err := lifecycleCache.InitializeLocalChaincodes(); err != nil { 447 return errors.WithMessage(err, "could not initialize local chaincodes") 448 } 449 450 // Parameter overrides must be processed before any parameters are 451 // cached. Failures to cache cause the server to terminate immediately. 452 if chaincodeDevMode { 453 logger.Info("Running in chaincode development mode") 454 logger.Info("Disable loading validity system chaincode") 455 456 viper.Set("chaincode.mode", chaincode.DevModeUserRunsChaincode) 457 } 458 459 mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert 460 policyCheckerProvider := func(resourceName string) deliver.PolicyCheckerFunc { 461 return func(env *cb.Envelope, channelID string) error { 462 return aclProvider.CheckACL(resourceName, channelID, env) 463 } 464 } 465 466 metrics := deliver.NewMetrics(metricsProvider) 467 abServer := &peer.DeliverServer{ 468 DeliverHandler: deliver.NewHandler( 469 &peer.DeliverChainManager{Peer: peerInstance}, 470 coreConfig.AuthenticationTimeWindow, 471 mutualTLS, 472 metrics, 473 false, 474 ), 475 PolicyCheckerProvider: policyCheckerProvider, 476 } 477 pb.RegisterDeliverServer(peerServer.Server(), abServer) 478 479 // Create a self-signed CA for chaincode service 480 ca, err := tlsgen.NewCA() 481 if err != nil { 482 logger.Panic("Failed creating authentication layer:", err) 483 } 484 ccSrv, ccEndpoint, err := createChaincodeServer(coreConfig, ca, peerHost) 485 if err != nil { 486 logger.Panicf("Failed to create chaincode server: %s", err) 487 } 488 489 //get user mode 490 userRunsCC := chaincode.IsDevMode() 491 tlsEnabled := coreConfig.PeerTLSEnabled 492 493 // create chaincode specific tls CA 494 authenticator := accesscontrol.NewAuthenticator(ca) 495 496 chaincodeHandlerRegistry := chaincode.NewHandlerRegistry(userRunsCC) 497 lifecycleTxQueryExecutorGetter := &chaincode.TxQueryExecutorGetter{ 498 CCID: scc.ChaincodeID(lifecycle.LifecycleNamespace), 499 HandlerRegistry: chaincodeHandlerRegistry, 500 } 501 502 if coreConfig.VMEndpoint == "" && len(coreConfig.ExternalBuilders) == 0 { 503 logger.Panic("VMEndpoint not set and no ExternalBuilders defined") 504 } 505 506 chaincodeConfig := chaincode.GlobalConfig() 507 508 var client *docker.Client 509 var dockerVM *dockercontroller.DockerVM 510 if coreConfig.VMEndpoint != "" { 511 client, err = createDockerClient(coreConfig) 512 if err != nil { 513 logger.Panicf("cannot create docker client: %s", err) 514 } 515 516 dockerVM = &dockercontroller.DockerVM{ 517 PeerID: coreConfig.PeerID, 518 NetworkID: coreConfig.NetworkID, 519 BuildMetrics: dockercontroller.NewBuildMetrics(opsSystem.Provider), 520 Client: client, 521 AttachStdOut: coreConfig.VMDockerAttachStdout, 522 HostConfig: getDockerHostConfig(), 523 ChaincodePull: coreConfig.ChaincodePull, 524 NetworkMode: coreConfig.VMNetworkMode, 525 PlatformBuilder: &platforms.Builder{ 526 Registry: platformRegistry, 527 Client: client, 528 }, 529 // This field is superfluous for chaincodes built with v2.0+ binaries 530 // however, to prevent users from being forced to rebuild leaving for now 531 // but it should be removed in the future. 532 LoggingEnv: []string{ 533 "CORE_CHAINCODE_LOGGING_LEVEL=" + chaincodeConfig.LogLevel, 534 "CORE_CHAINCODE_LOGGING_SHIM=" + chaincodeConfig.ShimLogLevel, 535 "CORE_CHAINCODE_LOGGING_FORMAT=" + chaincodeConfig.LogFormat, 536 }, 537 MSPID: mspID, 538 } 539 if err := opsSystem.RegisterChecker("docker", dockerVM); err != nil { 540 logger.Panicf("failed to register docker health check: %s", err) 541 } 542 } 543 544 externalVM := &externalbuilder.Detector{ 545 Builders: externalbuilder.CreateBuilders(coreConfig.ExternalBuilders, mspID), 546 DurablePath: externalBuilderOutput, 547 } 548 549 buildRegistry := &container.BuildRegistry{} 550 551 containerRouter := &container.Router{ 552 DockerBuilder: dockerVM, 553 ExternalBuilder: externalVMAdapter{externalVM}, 554 PackageProvider: &persistence.FallbackPackageLocator{ 555 ChaincodePackageLocator: &persistence.ChaincodePackageLocator{ 556 ChaincodeDir: chaincodeInstallPath, 557 }, 558 LegacyCCPackageLocator: &ccprovider.CCInfoFSImpl{GetHasher: factory.GetDefault()}, 559 }, 560 } 561 562 builtinSCCs := map[string]struct{}{ 563 "lscc": {}, 564 "qscc": {}, 565 "cscc": {}, 566 "_lifecycle": {}, 567 } 568 569 lsccInst := &lscc.SCC{ 570 BuiltinSCCs: builtinSCCs, 571 Support: &lscc.SupportImpl{ 572 GetMSPIDs: peerInstance.GetMSPIDs, 573 }, 574 SCCProvider: &lscc.PeerShim{Peer: peerInstance}, 575 ACLProvider: aclProvider, 576 GetMSPIDs: peerInstance.GetMSPIDs, 577 PolicyChecker: policyChecker, 578 BCCSP: factory.GetDefault(), 579 BuildRegistry: buildRegistry, 580 ChaincodeBuilder: containerRouter, 581 EbMetadataProvider: ebMetadataProvider, 582 } 583 584 chaincodeEndorsementInfo := &lifecycle.ChaincodeEndorsementInfoSource{ 585 LegacyImpl: lsccInst, 586 Resources: lifecycleResources, 587 Cache: lifecycleCache, 588 BuiltinSCCs: builtinSCCs, 589 } 590 591 containerRuntime := &chaincode.ContainerRuntime{ 592 BuildRegistry: buildRegistry, 593 ContainerRouter: containerRouter, 594 } 595 596 lifecycleFunctions := &lifecycle.ExternalFunctions{ 597 Resources: lifecycleResources, 598 InstallListener: lifecycleCache, 599 InstalledChaincodesLister: lifecycleCache, 600 ChaincodeBuilder: containerRouter, 601 BuildRegistry: buildRegistry, 602 } 603 604 lifecycleSCC := &lifecycle.SCC{ 605 Dispatcher: &dispatcher.Dispatcher{ 606 Protobuf: &dispatcher.ProtobufImpl{}, 607 }, 608 DeployedCCInfoProvider: lifecycleValidatorCommitter, 609 QueryExecutorProvider: lifecycleTxQueryExecutorGetter, 610 Functions: lifecycleFunctions, 611 OrgMSPID: mspID, 612 ChannelConfigSource: peerInstance, 613 ACLProvider: aclProvider, 614 } 615 616 chaincodeLauncher := &chaincode.RuntimeLauncher{ 617 Metrics: chaincode.NewLaunchMetrics(opsSystem.Provider), 618 Registry: chaincodeHandlerRegistry, 619 Runtime: containerRuntime, 620 StartupTimeout: chaincodeConfig.StartupTimeout, 621 CertGenerator: authenticator, 622 CACert: ca.CertBytes(), 623 PeerAddress: ccEndpoint, 624 ConnectionHandler: &extcc.ExternalChaincodeRuntime{}, 625 } 626 627 // Keep TestQueries working 628 if !chaincodeConfig.TLSEnabled { 629 chaincodeLauncher.CertGenerator = nil 630 } 631 632 chaincodeSupport := &chaincode.ChaincodeSupport{ 633 ACLProvider: aclProvider, 634 AppConfig: peerInstance, 635 DeployedCCInfoProvider: lifecycleValidatorCommitter, 636 ExecuteTimeout: chaincodeConfig.ExecuteTimeout, 637 InstallTimeout: chaincodeConfig.InstallTimeout, 638 HandlerRegistry: chaincodeHandlerRegistry, 639 HandlerMetrics: chaincode.NewHandlerMetrics(opsSystem.Provider), 640 Keepalive: chaincodeConfig.Keepalive, 641 Launcher: chaincodeLauncher, 642 Lifecycle: chaincodeEndorsementInfo, 643 Peer: peerInstance, 644 Runtime: containerRuntime, 645 BuiltinSCCs: builtinSCCs, 646 TotalQueryLimit: chaincodeConfig.TotalQueryLimit, 647 UserRunsCC: userRunsCC, 648 } 649 650 custodianLauncher := custodianLauncherAdapter{ 651 launcher: chaincodeLauncher, 652 streamHandler: chaincodeSupport, 653 } 654 go chaincodeCustodian.Work(buildRegistry, containerRouter, custodianLauncher) 655 656 ccSupSrv := pb.ChaincodeSupportServer(chaincodeSupport) 657 if tlsEnabled { 658 ccSupSrv = authenticator.Wrap(ccSupSrv) 659 } 660 661 csccInst := cscc.New( 662 aclProvider, 663 lifecycleValidatorCommitter, 664 lsccInst, 665 lifecycleValidatorCommitter, 666 policyChecker, 667 peerInstance, 668 factory.GetDefault(), 669 ) 670 qsccInst := scc.SelfDescribingSysCC(qscc.New(aclProvider, peerInstance)) 671 if maxConcurrency := coreConfig.LimitsConcurrencyQSCC; maxConcurrency != 0 { 672 qsccInst = scc.Throttle(maxConcurrency, qsccInst) 673 } 674 675 pb.RegisterChaincodeSupportServer(ccSrv.Server(), ccSupSrv) 676 677 // start the chaincode specific gRPC listening service 678 go ccSrv.Start() 679 680 logger.Debugf("Running peer") 681 682 libConf, err := library.LoadConfig() 683 if err != nil { 684 return errors.WithMessage(err, "could not decode peer handlers configuration") 685 } 686 687 reg := library.InitRegistry(libConf) 688 689 authFilters := reg.Lookup(library.Auth).([]authHandler.Filter) 690 endorserSupport := &endorser.SupportImpl{ 691 SignerSerializer: signingIdentity, 692 Peer: peerInstance, 693 ChaincodeSupport: chaincodeSupport, 694 ACLProvider: aclProvider, 695 BuiltinSCCs: builtinSCCs, 696 } 697 endorsementPluginsByName := reg.Lookup(library.Endorsement).(map[string]endorsement2.PluginFactory) 698 validationPluginsByName := reg.Lookup(library.Validation).(map[string]validation.PluginFactory) 699 signingIdentityFetcher := (endorsement3.SigningIdentityFetcher)(endorserSupport) 700 channelStateRetriever := endorser.ChannelStateRetriever(endorserSupport) 701 pluginMapper := endorser.MapBasedPluginMapper(endorsementPluginsByName) 702 pluginEndorser := endorser.NewPluginEndorser(&endorser.PluginSupport{ 703 ChannelStateRetriever: channelStateRetriever, 704 TransientStoreRetriever: peerInstance, 705 PluginMapper: pluginMapper, 706 SigningIdentityFetcher: signingIdentityFetcher, 707 }) 708 endorserSupport.PluginEndorser = pluginEndorser 709 channelFetcher := endorserChannelAdapter{ 710 peer: peerInstance, 711 } 712 serverEndorser := &endorser.Endorser{ 713 PrivateDataDistributor: gossipService, 714 ChannelFetcher: channelFetcher, 715 LocalMSP: localMSP, 716 Support: endorserSupport, 717 Metrics: endorser.NewMetrics(metricsProvider), 718 } 719 720 // deploy system chaincodes 721 for _, cc := range []scc.SelfDescribingSysCC{lsccInst, csccInst, qsccInst, lifecycleSCC} { 722 if enabled, ok := chaincodeConfig.SCCWhitelist[cc.Name()]; !ok || !enabled { 723 logger.Infof("not deploying chaincode %s as it is not enabled", cc.Name()) 724 continue 725 } 726 scc.DeploySysCC(cc, chaincodeSupport) 727 } 728 729 logger.Infof("Deployed system chaincodes") 730 731 // register the lifecycleMetadataManager to get updates from the legacy 732 // chaincode; lifecycleMetadataManager will aggregate these updates with 733 // the ones from the new lifecycle and deliver both 734 // this is expected to disappear with FAB-15061 735 legacyMetadataManager.AddListener(metadataManager) 736 737 // register gossip as a listener for updates from lifecycleMetadataManager 738 metadataManager.AddListener(lifecycle.HandleMetadataUpdateFunc(func(channel string, chaincodes ccdef.MetadataSet) { 739 gossipService.UpdateChaincodes(chaincodes.AsChaincodes(), gossipcommon.ChannelID(channel)) 740 })) 741 742 // this brings up all the channels 743 peerInstance.Initialize( 744 func(cid string) { 745 // initialize the metadata for this channel. 746 // This call will pre-populate chaincode information for this 747 // channel but it won't fire any updates to its listeners 748 lifecycleCache.InitializeMetadata(cid) 749 750 // initialize the legacyMetadataManager for this channel. 751 // This call will pre-populate chaincode information from 752 // the legacy lifecycle for this channel; it will also fire 753 // the listener, which will cascade to metadataManager 754 // and eventually to gossip to pre-populate data structures. 755 // this is expected to disappear with FAB-15061 756 sub, err := legacyMetadataManager.NewChannelSubscription(cid, cclifecycle.QueryCreatorFunc(func() (cclifecycle.Query, error) { 757 return peerInstance.GetLedger(cid).NewQueryExecutor() 758 })) 759 if err != nil { 760 logger.Panicf("Failed subscribing to chaincode lifecycle updates") 761 } 762 763 // register this channel's legacyMetadataManager (sub) to get ledger updates 764 // this is expected to disappear with FAB-15061 765 cceventmgmt.GetMgr().Register(cid, sub) 766 }, 767 peerServer, 768 plugin.MapBasedMapper(validationPluginsByName), 769 lifecycleValidatorCommitter, 770 lsccInst, 771 lifecycleValidatorCommitter, 772 coreConfig.ValidatorPoolSize, 773 ) 774 775 if coreConfig.DiscoveryEnabled { 776 registerDiscoveryService( 777 coreConfig, 778 peerInstance, 779 peerServer, 780 policyMgr, 781 lifecycle.NewMetadataProvider( 782 lifecycleCache, 783 legacyMetadataManager, 784 peerInstance, 785 ), 786 gossipService, 787 ) 788 } 789 790 logger.Infof("Starting peer with ID=[%s], network ID=[%s], address=[%s]", coreConfig.PeerID, coreConfig.NetworkID, coreConfig.PeerAddress) 791 792 // Get configuration before starting go routines to avoid 793 // racing in tests 794 profileEnabled := coreConfig.ProfileEnabled 795 profileListenAddress := coreConfig.ProfileListenAddress 796 797 // Start the grpc server. Done in a goroutine so we can deploy the 798 // genesis block if needed. 799 serve := make(chan error) 800 801 // Start profiling http endpoint if enabled 802 if profileEnabled { 803 go func() { 804 logger.Infof("Starting profiling server with listenAddress = %s", profileListenAddress) 805 if profileErr := http.ListenAndServe(profileListenAddress, nil); profileErr != nil { 806 logger.Errorf("Error starting profiler: %s", profileErr) 807 } 808 }() 809 } 810 811 go handleSignals(addPlatformSignals(map[os.Signal]func(){ 812 syscall.SIGINT: func() { serve <- nil }, 813 syscall.SIGTERM: func() { serve <- nil }, 814 })) 815 816 logger.Infof("Started peer with ID=[%s], network ID=[%s], address=[%s]", coreConfig.PeerID, coreConfig.NetworkID, coreConfig.PeerAddress) 817 818 // get a list of ledger IDs and load preResetHeight files for these ledger IDs 819 ledgerIDs, err := peerInstance.LedgerMgr.GetLedgerIDs() 820 if err != nil { 821 return errors.WithMessage(err, "failed to get ledger IDs") 822 } 823 824 // check to see if the peer ledgers have been reset 825 rootFSPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "ledgersData") 826 preResetHeights, err := kvledger.LoadPreResetHeight(rootFSPath, ledgerIDs) 827 if err != nil { 828 return fmt.Errorf("error loading prereset height: %s", err) 829 } 830 831 for cid, height := range preResetHeights { 832 logger.Infof("Ledger rebuild: channel [%s]: preresetHeight: [%d]", cid, height) 833 } 834 835 if len(preResetHeights) > 0 { 836 logger.Info("Ledger rebuild: Entering loop to check if current ledger heights surpass prereset ledger heights. Endorsement request processing will be disabled.") 837 resetFilter := &reset{ 838 reject: true, 839 } 840 authFilters = append(authFilters, resetFilter) 841 go resetLoop(resetFilter, preResetHeights, ledgerIDs, peerInstance.GetLedger, 10*time.Second) 842 } 843 844 // start the peer server 845 auth := authHandler.ChainFilters(serverEndorser, authFilters...) 846 // Register the Endorser server 847 pb.RegisterEndorserServer(peerServer.Server(), auth) 848 849 go func() { 850 var grpcErr error 851 if grpcErr = peerServer.Start(); grpcErr != nil { 852 grpcErr = fmt.Errorf("grpc server exited with error: %s", grpcErr) 853 } 854 serve <- grpcErr 855 }() 856 857 // Block until grpc server exits 858 return <-serve 859 } 860 861 func handleSignals(handlers map[os.Signal]func()) { 862 var signals []os.Signal 863 for sig := range handlers { 864 signals = append(signals, sig) 865 } 866 867 signalChan := make(chan os.Signal, 1) 868 signal.Notify(signalChan, signals...) 869 870 for sig := range signalChan { 871 logger.Infof("Received signal: %d (%s)", sig, sig) 872 handlers[sig]() 873 } 874 } 875 876 func localPolicy(policyObject proto.Message) policies.Policy { 877 localMSP := mgmt.GetLocalMSP(factory.GetDefault()) 878 pp := cauthdsl.NewPolicyProvider(localMSP) 879 policy, _, err := pp.NewPolicy(protoutil.MarshalOrPanic(policyObject)) 880 if err != nil { 881 logger.Panicf("Failed creating local policy: +%v", err) 882 } 883 return policy 884 } 885 886 func createSelfSignedData() protoutil.SignedData { 887 sID := mgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault()) 888 msg := make([]byte, 32) 889 sig, err := sID.Sign(msg) 890 if err != nil { 891 logger.Panicf("Failed creating self signed data because message signing failed: %v", err) 892 } 893 peerIdentity, err := sID.Serialize() 894 if err != nil { 895 logger.Panicf("Failed creating self signed data because peer identity couldn't be serialized: %v", err) 896 } 897 return protoutil.SignedData{ 898 Data: msg, 899 Signature: sig, 900 Identity: peerIdentity, 901 } 902 } 903 904 func registerDiscoveryService( 905 coreConfig *peer.Config, 906 peerInstance *peer.Peer, 907 peerServer *comm.GRPCServer, 908 polMgr policies.ChannelPolicyManagerGetter, 909 metadataProvider *lifecycle.MetadataProvider, 910 gossipService *gossipservice.GossipService, 911 ) { 912 mspID := coreConfig.LocalMSPID 913 localAccessPolicy := localPolicy(cauthdsl.SignedByAnyAdmin([]string{mspID})) 914 if coreConfig.DiscoveryOrgMembersAllowed { 915 localAccessPolicy = localPolicy(cauthdsl.SignedByAnyMember([]string{mspID})) 916 } 917 channelVerifier := discacl.NewChannelVerifier(policies.ChannelApplicationWriters, polMgr) 918 acl := discacl.NewDiscoverySupport(channelVerifier, localAccessPolicy, discacl.ChannelConfigGetterFunc(peerInstance.GetStableChannelConfig)) 919 gSup := gossip.NewDiscoverySupport(gossipService) 920 ccSup := ccsupport.NewDiscoverySupport(metadataProvider) 921 ea := endorsement.NewEndorsementAnalyzer(gSup, ccSup, acl, metadataProvider) 922 confSup := config.NewDiscoverySupport(config.CurrentConfigBlockGetterFunc(func(channelID string) *common.Block { 923 channel := peerInstance.Channel(channelID) 924 if channel == nil { 925 return nil 926 } 927 block, err := peer.ConfigBlockFromLedger(channel.Ledger()) 928 if err != nil { 929 logger.Error("failed to get config block", err) 930 return nil 931 } 932 return block 933 })) 934 support := discsupport.NewDiscoverySupport(acl, gSup, ea, confSup, acl) 935 svc := discovery.NewService(discovery.Config{ 936 TLS: peerServer.TLSEnabled(), 937 AuthCacheEnabled: coreConfig.DiscoveryAuthCacheEnabled, 938 AuthCacheMaxSize: coreConfig.DiscoveryAuthCacheMaxSize, 939 AuthCachePurgeRetentionRatio: coreConfig.DiscoveryAuthCachePurgeRetentionRatio, 940 }, support) 941 logger.Info("Discovery service activated") 942 discprotos.RegisterDiscoveryServer(peerServer.Server(), svc) 943 } 944 945 // create a CC listener using peer.chaincodeListenAddress (and if that's not set use peer.peerAddress) 946 func createChaincodeServer(coreConfig *peer.Config, ca tlsgen.CA, peerHostname string) (srv *comm.GRPCServer, ccEndpoint string, err error) { 947 // before potentially setting chaincodeListenAddress, compute chaincode endpoint at first 948 ccEndpoint, err = computeChaincodeEndpoint(coreConfig.ChaincodeAddress, coreConfig.ChaincodeListenAddress, peerHostname) 949 if err != nil { 950 if chaincode.IsDevMode() { 951 // if any error for dev mode, we use 0.0.0.0:7052 952 ccEndpoint = fmt.Sprintf("%s:%d", "0.0.0.0", defaultChaincodePort) 953 logger.Warningf("use %s as chaincode endpoint because of error in computeChaincodeEndpoint: %s", ccEndpoint, err) 954 } else { 955 // for non-dev mode, we have to return error 956 logger.Errorf("Error computing chaincode endpoint: %s", err) 957 return nil, "", err 958 } 959 } 960 961 host, _, err := net.SplitHostPort(ccEndpoint) 962 if err != nil { 963 logger.Panic("Chaincode service host", ccEndpoint, "isn't a valid hostname:", err) 964 } 965 966 cclistenAddress := coreConfig.ChaincodeListenAddress 967 if cclistenAddress == "" { 968 cclistenAddress = fmt.Sprintf("%s:%d", peerHostname, defaultChaincodePort) 969 logger.Warningf("%s is not set, using %s", chaincodeListenAddrKey, cclistenAddress) 970 coreConfig.ChaincodeListenAddress = cclistenAddress 971 } 972 973 config, err := peer.GetServerConfig() 974 if err != nil { 975 logger.Errorf("Error getting server config: %s", err) 976 return nil, "", err 977 } 978 979 // set the logger for the server 980 config.Logger = flogging.MustGetLogger("core.comm").With("server", "ChaincodeServer") 981 982 // Override TLS configuration if TLS is applicable 983 if config.SecOpts.UseTLS { 984 // Create a self-signed TLS certificate with a SAN that matches the computed chaincode endpoint 985 certKeyPair, err := ca.NewServerCertKeyPair(host) 986 if err != nil { 987 logger.Panicf("Failed generating TLS certificate for chaincode service: +%v", err) 988 } 989 config.SecOpts = comm.SecureOptions{ 990 UseTLS: true, 991 // Require chaincode shim to authenticate itself 992 RequireClientCert: true, 993 // Trust only client certificates signed by ourselves 994 ClientRootCAs: [][]byte{ca.CertBytes()}, 995 // Use our own self-signed TLS certificate and key 996 Certificate: certKeyPair.Cert, 997 Key: certKeyPair.Key, 998 // No point in specifying server root CAs since this TLS config is only used for 999 // a gRPC server and not a client 1000 ServerRootCAs: nil, 1001 } 1002 } 1003 1004 // Chaincode keepalive options - static for now 1005 chaincodeKeepaliveOptions := comm.KeepaliveOptions{ 1006 ServerInterval: time.Duration(2) * time.Hour, // 2 hours - gRPC default 1007 ServerTimeout: time.Duration(20) * time.Second, // 20 sec - gRPC default 1008 ServerMinInterval: time.Duration(1) * time.Minute, // match ClientInterval 1009 } 1010 config.KaOpts = chaincodeKeepaliveOptions 1011 config.HealthCheckEnabled = true 1012 1013 srv, err = comm.NewGRPCServer(cclistenAddress, config) 1014 if err != nil { 1015 logger.Errorf("Error creating GRPC server: %s", err) 1016 return nil, "", err 1017 } 1018 1019 return srv, ccEndpoint, nil 1020 } 1021 1022 // computeChaincodeEndpoint will utilize chaincode address, chaincode listen 1023 // address (these two are from viper) and peer address to compute chaincode endpoint. 1024 // There could be following cases of computing chaincode endpoint: 1025 // Case A: if chaincodeAddrKey is set, use it if not "0.0.0.0" (or "::") 1026 // Case B: else if chaincodeListenAddressKey is set and not "0.0.0.0" or ("::"), use it 1027 // Case C: else use peer address if not "0.0.0.0" (or "::") 1028 // Case D: else return error 1029 func computeChaincodeEndpoint(chaincodeAddress string, chaincodeListenAddress string, peerHostname string) (ccEndpoint string, err error) { 1030 logger.Infof("Entering computeChaincodeEndpoint with peerHostname: %s", peerHostname) 1031 // Case A: the chaincodeAddrKey is set 1032 if chaincodeAddress != "" { 1033 host, _, err := net.SplitHostPort(chaincodeAddress) 1034 if err != nil { 1035 logger.Errorf("Fail to split chaincodeAddress: %s", err) 1036 return "", err 1037 } 1038 ccIP := net.ParseIP(host) 1039 if ccIP != nil && ccIP.IsUnspecified() { 1040 logger.Errorf("ChaincodeAddress' IP cannot be %s in non-dev mode", ccIP) 1041 return "", errors.New("invalid endpoint for chaincode to connect") 1042 } 1043 logger.Infof("Exit with ccEndpoint: %s", chaincodeAddress) 1044 return chaincodeAddress, nil 1045 } 1046 1047 // Case B: chaincodeListenAddrKey is set 1048 if chaincodeListenAddress != "" { 1049 ccEndpoint = chaincodeListenAddress 1050 host, port, err := net.SplitHostPort(ccEndpoint) 1051 if err != nil { 1052 logger.Errorf("ChaincodeAddress is nil and fail to split chaincodeListenAddress: %s", err) 1053 return "", err 1054 } 1055 1056 ccListenerIP := net.ParseIP(host) 1057 // ignoring other values such as Multicast address etc ...as the server 1058 // wouldn't start up with this address anyway 1059 if ccListenerIP != nil && ccListenerIP.IsUnspecified() { 1060 // Case C: if "0.0.0.0" or "::", we have to use peer address with the listen port 1061 peerIP := net.ParseIP(peerHostname) 1062 if peerIP != nil && peerIP.IsUnspecified() { 1063 // Case D: all we have is "0.0.0.0" or "::" which chaincode cannot connect to 1064 logger.Error("ChaincodeAddress is nil while both chaincodeListenAddressIP and peerIP are 0.0.0.0") 1065 return "", errors.New("invalid endpoint for chaincode to connect") 1066 } 1067 ccEndpoint = fmt.Sprintf("%s:%s", peerHostname, port) 1068 } 1069 logger.Infof("Exit with ccEndpoint: %s", ccEndpoint) 1070 return ccEndpoint, nil 1071 } 1072 1073 // Case C: chaincodeListenAddrKey is not set, use peer address 1074 peerIP := net.ParseIP(peerHostname) 1075 if peerIP != nil && peerIP.IsUnspecified() { 1076 // Case D: all we have is "0.0.0.0" or "::" which chaincode cannot connect to 1077 logger.Errorf("ChaincodeAddress and chaincodeListenAddress are nil and peerIP is %s", peerIP) 1078 return "", errors.New("invalid endpoint for chaincode to connect") 1079 } 1080 1081 // use peerAddress:defaultChaincodePort 1082 ccEndpoint = fmt.Sprintf("%s:%d", peerHostname, defaultChaincodePort) 1083 1084 logger.Infof("Exit with ccEndpoint: %s", ccEndpoint) 1085 return ccEndpoint, nil 1086 } 1087 1088 func createDockerClient(coreConfig *peer.Config) (*docker.Client, error) { 1089 if coreConfig.VMDockerTLSEnabled { 1090 return docker.NewTLSClient(coreConfig.VMEndpoint, coreConfig.DockerCert, coreConfig.DockerKey, coreConfig.DockerCA) 1091 } 1092 return docker.NewClient(coreConfig.VMEndpoint) 1093 } 1094 1095 // secureDialOpts is the callback function for secure dial options for gossip service 1096 func secureDialOpts(credSupport *comm.CredentialSupport) func() []grpc.DialOption { 1097 return func() []grpc.DialOption { 1098 var dialOpts []grpc.DialOption 1099 // set max send/recv msg sizes 1100 dialOpts = append( 1101 dialOpts, 1102 grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(comm.MaxRecvMsgSize), grpc.MaxCallSendMsgSize(comm.MaxSendMsgSize)), 1103 ) 1104 // set the keepalive options 1105 kaOpts := comm.DefaultKeepaliveOptions 1106 if viper.IsSet("peer.keepalive.client.interval") { 1107 kaOpts.ClientInterval = viper.GetDuration("peer.keepalive.client.interval") 1108 } 1109 if viper.IsSet("peer.keepalive.client.timeout") { 1110 kaOpts.ClientTimeout = viper.GetDuration("peer.keepalive.client.timeout") 1111 } 1112 dialOpts = append(dialOpts, comm.ClientKeepaliveOptions(kaOpts)...) 1113 1114 if viper.GetBool("peer.tls.enabled") { 1115 dialOpts = append(dialOpts, grpc.WithTransportCredentials(credSupport.GetPeerCredentials())) 1116 } else { 1117 dialOpts = append(dialOpts, grpc.WithInsecure()) 1118 } 1119 return dialOpts 1120 } 1121 } 1122 1123 // initGossipService will initialize the gossip service by: 1124 // 1. Enable TLS if configured; 1125 // 2. Init the message crypto service; 1126 // 3. Init the security advisor; 1127 // 4. Init gossip related struct. 1128 func initGossipService( 1129 policyMgr policies.ChannelPolicyManagerGetter, 1130 metricsProvider metrics.Provider, 1131 peerServer *comm.GRPCServer, 1132 signer msp.SigningIdentity, 1133 credSupport *comm.CredentialSupport, 1134 peerAddress string, 1135 deliverGRPCClient *comm.GRPCClient, 1136 deliverServiceConfig *deliverservice.DeliverServiceConfig, 1137 ) (*gossipservice.GossipService, error) { 1138 1139 var certs *gossipcommon.TLSCertificates 1140 if peerServer.TLSEnabled() { 1141 serverCert := peerServer.ServerCertificate() 1142 clientCert, err := peer.GetClientCertificate() 1143 if err != nil { 1144 return nil, errors.Wrap(err, "failed obtaining client certificates") 1145 } 1146 certs = &gossipcommon.TLSCertificates{} 1147 certs.TLSServerCert.Store(&serverCert) 1148 certs.TLSClientCert.Store(&clientCert) 1149 } 1150 1151 messageCryptoService := peergossip.NewMCS( 1152 policyMgr, 1153 signer, 1154 mgmt.NewDeserializersManager(factory.GetDefault()), 1155 factory.GetDefault(), 1156 ) 1157 secAdv := peergossip.NewSecurityAdvisor(mgmt.NewDeserializersManager(factory.GetDefault())) 1158 bootstrap := viper.GetStringSlice("peer.gossip.bootstrap") 1159 1160 serviceConfig := service.GlobalConfig() 1161 if serviceConfig.Endpoint != "" { 1162 peerAddress = serviceConfig.Endpoint 1163 } 1164 gossipConfig, err := gossipgossip.GlobalConfig(peerAddress, certs, bootstrap...) 1165 if err != nil { 1166 return nil, errors.Wrap(err, "failed obtaining gossip config") 1167 } 1168 1169 return gossipservice.New( 1170 signer, 1171 gossipmetrics.NewGossipMetrics(metricsProvider), 1172 peerAddress, 1173 peerServer.Server(), 1174 messageCryptoService, 1175 secAdv, 1176 secureDialOpts(credSupport), 1177 credSupport, 1178 deliverGRPCClient, 1179 gossipConfig, 1180 serviceConfig, 1181 deliverServiceConfig, 1182 ) 1183 } 1184 1185 func newOperationsSystem(coreConfig *peer.Config) *operations.System { 1186 return operations.NewSystem(operations.Options{ 1187 Logger: flogging.MustGetLogger("peer.operations"), 1188 ListenAddress: coreConfig.OperationsListenAddress, 1189 Metrics: operations.MetricsOptions{ 1190 Provider: coreConfig.MetricsProvider, 1191 Statsd: &operations.Statsd{ 1192 Network: coreConfig.StatsdNetwork, 1193 Address: coreConfig.StatsdAaddress, 1194 WriteInterval: coreConfig.StatsdWriteInterval, 1195 Prefix: coreConfig.StatsdPrefix, 1196 }, 1197 }, 1198 TLS: operations.TLS{ 1199 Enabled: coreConfig.OperationsTLSEnabled, 1200 CertFile: coreConfig.OperationsTLSCertFile, 1201 KeyFile: coreConfig.OperationsTLSKeyFile, 1202 ClientCertRequired: coreConfig.OperationsTLSClientAuthRequired, 1203 ClientCACertFiles: coreConfig.OperationsTLSClientRootCAs, 1204 }, 1205 Version: metadata.Version, 1206 }) 1207 } 1208 1209 func getDockerHostConfig() *docker.HostConfig { 1210 dockerKey := func(key string) string { return "vm.docker.hostConfig." + key } 1211 getInt64 := func(key string) int64 { return int64(viper.GetInt(dockerKey(key))) } 1212 1213 var logConfig docker.LogConfig 1214 err := viper.UnmarshalKey(dockerKey("LogConfig"), &logConfig) 1215 if err != nil { 1216 logger.Panicf("unable to parse Docker LogConfig: %s", err) 1217 } 1218 1219 networkMode := viper.GetString(dockerKey("NetworkMode")) 1220 if networkMode == "" { 1221 networkMode = "host" 1222 } 1223 1224 memorySwappiness := getInt64("MemorySwappiness") 1225 oomKillDisable := viper.GetBool(dockerKey("OomKillDisable")) 1226 1227 return &docker.HostConfig{ 1228 CapAdd: viper.GetStringSlice(dockerKey("CapAdd")), 1229 CapDrop: viper.GetStringSlice(dockerKey("CapDrop")), 1230 1231 DNS: viper.GetStringSlice(dockerKey("Dns")), 1232 DNSSearch: viper.GetStringSlice(dockerKey("DnsSearch")), 1233 ExtraHosts: viper.GetStringSlice(dockerKey("ExtraHosts")), 1234 NetworkMode: networkMode, 1235 IpcMode: viper.GetString(dockerKey("IpcMode")), 1236 PidMode: viper.GetString(dockerKey("PidMode")), 1237 UTSMode: viper.GetString(dockerKey("UTSMode")), 1238 LogConfig: logConfig, 1239 1240 ReadonlyRootfs: viper.GetBool(dockerKey("ReadonlyRootfs")), 1241 SecurityOpt: viper.GetStringSlice(dockerKey("SecurityOpt")), 1242 CgroupParent: viper.GetString(dockerKey("CgroupParent")), 1243 Memory: getInt64("Memory"), 1244 MemorySwap: getInt64("MemorySwap"), 1245 MemorySwappiness: &memorySwappiness, 1246 OOMKillDisable: &oomKillDisable, 1247 CPUShares: getInt64("CpuShares"), 1248 CPUSet: viper.GetString(dockerKey("Cpuset")), 1249 CPUSetCPUs: viper.GetString(dockerKey("CpusetCPUs")), 1250 CPUSetMEMs: viper.GetString(dockerKey("CpusetMEMs")), 1251 CPUQuota: getInt64("CpuQuota"), 1252 CPUPeriod: getInt64("CpuPeriod"), 1253 BlkioWeight: getInt64("BlkioWeight"), 1254 } 1255 } 1256 1257 //go:generate counterfeiter -o mock/get_ledger.go -fake-name GetLedger . getLedger 1258 //go:generate counterfeiter -o mock/peer_ledger.go -fake-name PeerLedger . peerLedger 1259 1260 type peerLedger interface { 1261 ledger.PeerLedger 1262 } 1263 1264 type getLedger func(string) ledger.PeerLedger 1265 1266 func resetLoop( 1267 resetFilter *reset, 1268 preResetHeights map[string]uint64, 1269 ledgerIDs []string, 1270 pLedger getLedger, 1271 interval time.Duration, 1272 ) { 1273 // periodically check to see if current ledger height(s) surpass prereset height(s) 1274 ticker := time.NewTicker(interval) 1275 1276 defer ticker.Stop() 1277 for { 1278 select { 1279 case <-ticker.C: 1280 logger.Info("Ledger rebuild: Checking if current ledger heights surpass prereset ledger heights") 1281 logger.Debugf("Ledger rebuild: Number of ledgers still rebuilding before check: %d", len(preResetHeights)) 1282 for cid, height := range preResetHeights { 1283 var l peerLedger 1284 l = pLedger(cid) 1285 if l == nil { 1286 logger.Warningf("No ledger found for channel [%s]", cid) 1287 continue 1288 } 1289 bcInfo, err := l.GetBlockchainInfo() 1290 if bcInfo != nil { 1291 logger.Debugf("Ledger rebuild: channel [%s]: currentHeight [%d] : preresetHeight [%d]", cid, bcInfo.GetHeight(), height) 1292 if bcInfo.GetHeight() >= height { 1293 delete(preResetHeights, cid) 1294 } else { 1295 break 1296 } 1297 } else { 1298 if err != nil { 1299 logger.Warningf("Ledger rebuild: could not retrieve info for channel [%s]: %s", cid, err.Error()) 1300 } 1301 } 1302 } 1303 1304 logger.Debugf("Ledger rebuild: Number of ledgers still rebuilding after check: %d", len(preResetHeights)) 1305 if len(preResetHeights) == 0 { 1306 logger.Infof("Ledger rebuild: Complete, all ledgers surpass prereset heights. Endorsement request processing will be enabled.") 1307 rootFSPath := filepath.Join(coreconfig.GetPath("peer.fileSystemPath"), "ledgersData") 1308 err := kvledger.ClearPreResetHeight(rootFSPath, ledgerIDs) 1309 if err != nil { 1310 logger.Warningf("Ledger rebuild: could not clear off prerest files: error=%s", err) 1311 } 1312 resetFilter.setReject(false) 1313 return 1314 } 1315 } 1316 } 1317 } 1318 1319 //implements the auth.Filter interface 1320 type reset struct { 1321 sync.RWMutex 1322 next pb.EndorserServer 1323 reject bool 1324 } 1325 1326 func (r *reset) setReject(reject bool) { 1327 r.Lock() 1328 defer r.Unlock() 1329 r.reject = reject 1330 } 1331 1332 // Init initializes Reset with the next EndorserServer 1333 func (r *reset) Init(next pb.EndorserServer) { 1334 r.next = next 1335 } 1336 1337 // ProcessProposal processes a signed proposal 1338 func (r *reset) ProcessProposal(ctx context.Context, signedProp *pb.SignedProposal) (*pb.ProposalResponse, error) { 1339 r.RLock() 1340 defer r.RUnlock() 1341 if r.reject { 1342 return nil, errors.New("endorse requests are blocked while ledgers are being rebuilt") 1343 } 1344 return r.next.ProcessProposal(ctx, signedProp) 1345 }