github.com/true-sqn/fabric@v2.1.1+incompatible/orderer/consensus/etcdraft/consenter.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package etcdraft
     8  
     9  import (
    10  	"bytes"
    11  	"path"
    12  	"reflect"
    13  	"time"
    14  
    15  	"code.cloudfoundry.org/clock"
    16  	"github.com/golang/protobuf/proto"
    17  	"github.com/hyperledger/fabric-protos-go/common"
    18  	"github.com/hyperledger/fabric-protos-go/orderer"
    19  	"github.com/hyperledger/fabric-protos-go/orderer/etcdraft"
    20  	"github.com/hyperledger/fabric/bccsp"
    21  	"github.com/hyperledger/fabric/common/flogging"
    22  	"github.com/hyperledger/fabric/common/metrics"
    23  	"github.com/hyperledger/fabric/internal/pkg/comm"
    24  	"github.com/hyperledger/fabric/orderer/common/cluster"
    25  	"github.com/hyperledger/fabric/orderer/common/localconfig"
    26  	"github.com/hyperledger/fabric/orderer/common/multichannel"
    27  	"github.com/hyperledger/fabric/orderer/consensus"
    28  	"github.com/hyperledger/fabric/orderer/consensus/inactive"
    29  	"github.com/mitchellh/mapstructure"
    30  	"github.com/pkg/errors"
    31  	"go.etcd.io/etcd/raft"
    32  )
    33  
    34  //go:generate mockery -dir . -name InactiveChainRegistry -case underscore -output mocks
    35  
    36  // InactiveChainRegistry registers chains that are inactive
    37  type InactiveChainRegistry interface {
    38  	// TrackChain tracks a chain with the given name, and calls the given callback
    39  	// when this chain should be created.
    40  	TrackChain(chainName string, genesisBlock *common.Block, createChain func())
    41  }
    42  
    43  //go:generate mockery -dir . -name ChainGetter -case underscore -output mocks
    44  
    45  // ChainGetter obtains instances of ChainSupport for the given channel
    46  type ChainGetter interface {
    47  	// GetChain obtains the ChainSupport for the given channel.
    48  	// Returns nil, false when the ChainSupport for the given channel
    49  	// isn't found.
    50  	GetChain(chainID string) *multichannel.ChainSupport
    51  }
    52  
    53  // Config contains etcdraft configurations
    54  type Config struct {
    55  	WALDir            string // WAL data of <my-channel> is stored in WALDir/<my-channel>
    56  	SnapDir           string // Snapshots of <my-channel> are stored in SnapDir/<my-channel>
    57  	EvictionSuspicion string // Duration threshold that the node samples in order to suspect its eviction from the channel.
    58  }
    59  
    60  // Consenter implements etcdraft consenter
    61  type Consenter struct {
    62  	CreateChain           func(chainName string)
    63  	InactiveChainRegistry InactiveChainRegistry
    64  	Dialer                *cluster.PredicateDialer
    65  	Communication         cluster.Communicator
    66  	*Dispatcher
    67  	Chains         ChainGetter
    68  	Logger         *flogging.FabricLogger
    69  	EtcdRaftConfig Config
    70  	OrdererConfig  localconfig.TopLevel
    71  	Cert           []byte
    72  	Metrics        *Metrics
    73  	BCCSP          bccsp.BCCSP
    74  }
    75  
    76  // TargetChannel extracts the channel from the given proto.Message.
    77  // Returns an empty string on failure.
    78  func (c *Consenter) TargetChannel(message proto.Message) string {
    79  	switch req := message.(type) {
    80  	case *orderer.ConsensusRequest:
    81  		return req.Channel
    82  	case *orderer.SubmitRequest:
    83  		return req.Channel
    84  	default:
    85  		return ""
    86  	}
    87  }
    88  
    89  // ReceiverByChain returns the MessageReceiver for the given channelID or nil
    90  // if not found.
    91  func (c *Consenter) ReceiverByChain(channelID string) MessageReceiver {
    92  	cs := c.Chains.GetChain(channelID)
    93  	if cs == nil {
    94  		return nil
    95  	}
    96  	if cs.Chain == nil {
    97  		c.Logger.Panicf("Programming error - Chain %s is nil although it exists in the mapping", channelID)
    98  	}
    99  	if etcdRaftChain, isEtcdRaftChain := cs.Chain.(*Chain); isEtcdRaftChain {
   100  		return etcdRaftChain
   101  	}
   102  	c.Logger.Warningf("Chain %s is of type %v and not etcdraft.Chain", channelID, reflect.TypeOf(cs.Chain))
   103  	return nil
   104  }
   105  
   106  func (c *Consenter) detectSelfID(consenters map[uint64]*etcdraft.Consenter) (uint64, error) {
   107  	thisNodeCertAsDER, err := pemToDER(c.Cert, 0, "server", c.Logger)
   108  	if err != nil {
   109  		return 0, err
   110  	}
   111  
   112  	var serverCertificates []string
   113  	for nodeID, cst := range consenters {
   114  		serverCertificates = append(serverCertificates, string(cst.ServerTlsCert))
   115  
   116  		certAsDER, err := pemToDER(cst.ServerTlsCert, nodeID, "server", c.Logger)
   117  		if err != nil {
   118  			return 0, err
   119  		}
   120  
   121  		if bytes.Equal(thisNodeCertAsDER, certAsDER) {
   122  			return nodeID, nil
   123  		}
   124  	}
   125  
   126  	c.Logger.Warning("Could not find", string(c.Cert), "among", serverCertificates)
   127  	return 0, cluster.ErrNotInChannel
   128  }
   129  
   130  // HandleChain returns a new Chain instance or an error upon failure
   131  func (c *Consenter) HandleChain(support consensus.ConsenterSupport, metadata *common.Metadata) (consensus.Chain, error) {
   132  	m := &etcdraft.ConfigMetadata{}
   133  	if err := proto.Unmarshal(support.SharedConfig().ConsensusMetadata(), m); err != nil {
   134  		return nil, errors.Wrap(err, "failed to unmarshal consensus metadata")
   135  	}
   136  
   137  	if m.Options == nil {
   138  		return nil, errors.New("etcdraft options have not been provided")
   139  	}
   140  
   141  	isMigration := (metadata == nil || len(metadata.Value) == 0) && (support.Height() > 1)
   142  	if isMigration {
   143  		c.Logger.Debugf("Block metadata is nil at block height=%d, it is consensus-type migration", support.Height())
   144  	}
   145  
   146  	// determine raft replica set mapping for each node to its id
   147  	// for newly started chain we need to read and initialize raft
   148  	// metadata by creating mapping between conseter and its id.
   149  	// In case chain has been restarted we restore raft metadata
   150  	// information from the recently committed block meta data
   151  	// field.
   152  	blockMetadata, err := ReadBlockMetadata(metadata, m)
   153  	if err != nil {
   154  		return nil, errors.Wrapf(err, "failed to read Raft metadata")
   155  	}
   156  
   157  	consenters := CreateConsentersMap(blockMetadata, m)
   158  
   159  	id, err := c.detectSelfID(consenters)
   160  	if err != nil {
   161  		c.InactiveChainRegistry.TrackChain(support.ChannelID(), support.Block(0), func() {
   162  			c.CreateChain(support.ChannelID())
   163  		})
   164  		return &inactive.Chain{Err: errors.Errorf("channel %s is not serviced by me", support.ChannelID())}, nil
   165  	}
   166  
   167  	var evictionSuspicion time.Duration
   168  	if c.EtcdRaftConfig.EvictionSuspicion == "" {
   169  		c.Logger.Infof("EvictionSuspicion not set, defaulting to %v", DefaultEvictionSuspicion)
   170  		evictionSuspicion = DefaultEvictionSuspicion
   171  	} else {
   172  		evictionSuspicion, err = time.ParseDuration(c.EtcdRaftConfig.EvictionSuspicion)
   173  		if err != nil {
   174  			c.Logger.Panicf("Failed parsing Consensus.EvictionSuspicion: %s: %v", c.EtcdRaftConfig.EvictionSuspicion, err)
   175  		}
   176  	}
   177  
   178  	tickInterval, err := time.ParseDuration(m.Options.TickInterval)
   179  	if err != nil {
   180  		return nil, errors.Errorf("failed to parse TickInterval (%s) to time duration", m.Options.TickInterval)
   181  	}
   182  
   183  	opts := Options{
   184  		RaftID:        id,
   185  		Clock:         clock.NewClock(),
   186  		MemoryStorage: raft.NewMemoryStorage(),
   187  		Logger:        c.Logger,
   188  
   189  		TickInterval:         tickInterval,
   190  		ElectionTick:         int(m.Options.ElectionTick),
   191  		HeartbeatTick:        int(m.Options.HeartbeatTick),
   192  		MaxInflightBlocks:    int(m.Options.MaxInflightBlocks),
   193  		MaxSizePerMsg:        uint64(support.SharedConfig().BatchSize().PreferredMaxBytes),
   194  		SnapshotIntervalSize: m.Options.SnapshotIntervalSize,
   195  
   196  		BlockMetadata: blockMetadata,
   197  		Consenters:    consenters,
   198  
   199  		MigrationInit: isMigration,
   200  
   201  		WALDir:            path.Join(c.EtcdRaftConfig.WALDir, support.ChannelID()),
   202  		SnapDir:           path.Join(c.EtcdRaftConfig.SnapDir, support.ChannelID()),
   203  		EvictionSuspicion: evictionSuspicion,
   204  		Cert:              c.Cert,
   205  		Metrics:           c.Metrics,
   206  	}
   207  
   208  	rpc := &cluster.RPC{
   209  		Timeout:       c.OrdererConfig.General.Cluster.RPCTimeout,
   210  		Logger:        c.Logger,
   211  		Channel:       support.ChannelID(),
   212  		Comm:          c.Communication,
   213  		StreamsByType: cluster.NewStreamsByType(),
   214  	}
   215  	return NewChain(
   216  		support,
   217  		opts,
   218  		c.Communication,
   219  		rpc,
   220  		c.BCCSP,
   221  		func() (BlockPuller, error) {
   222  			return NewBlockPuller(support, c.Dialer, c.OrdererConfig.General.Cluster, c.BCCSP)
   223  		},
   224  		func() {
   225  			c.InactiveChainRegistry.TrackChain(support.ChannelID(), nil, func() { c.CreateChain(support.ChannelID()) })
   226  		},
   227  		nil,
   228  	)
   229  }
   230  
   231  // ReadBlockMetadata attempts to read raft metadata from block metadata, if available.
   232  // otherwise, it reads raft metadata from config metadata supplied.
   233  func ReadBlockMetadata(blockMetadata *common.Metadata, configMetadata *etcdraft.ConfigMetadata) (*etcdraft.BlockMetadata, error) {
   234  	if blockMetadata != nil && len(blockMetadata.Value) != 0 { // we have consenters mapping from block
   235  		m := &etcdraft.BlockMetadata{}
   236  		if err := proto.Unmarshal(blockMetadata.Value, m); err != nil {
   237  			return nil, errors.Wrap(err, "failed to unmarshal block's metadata")
   238  		}
   239  		return m, nil
   240  	}
   241  
   242  	m := &etcdraft.BlockMetadata{
   243  		NextConsenterId: 1,
   244  		ConsenterIds:    make([]uint64, len(configMetadata.Consenters)),
   245  	}
   246  	// need to read consenters from the configuration
   247  	for i := range m.ConsenterIds {
   248  		m.ConsenterIds[i] = m.NextConsenterId
   249  		m.NextConsenterId++
   250  	}
   251  
   252  	return m, nil
   253  }
   254  
   255  // New creates a etcdraft Consenter
   256  func New(
   257  	clusterDialer *cluster.PredicateDialer,
   258  	conf *localconfig.TopLevel,
   259  	srvConf comm.ServerConfig,
   260  	srv *comm.GRPCServer,
   261  	r *multichannel.Registrar,
   262  	icr InactiveChainRegistry,
   263  	metricsProvider metrics.Provider,
   264  	bccsp bccsp.BCCSP,
   265  ) *Consenter {
   266  	logger := flogging.MustGetLogger("orderer.consensus.etcdraft")
   267  
   268  	var cfg Config
   269  	err := mapstructure.Decode(conf.Consensus, &cfg)
   270  	if err != nil {
   271  		logger.Panicf("Failed to decode etcdraft configuration: %s", err)
   272  	}
   273  
   274  	consenter := &Consenter{
   275  		CreateChain:           r.CreateChain,
   276  		Cert:                  srvConf.SecOpts.Certificate,
   277  		Logger:                logger,
   278  		Chains:                r,
   279  		EtcdRaftConfig:        cfg,
   280  		OrdererConfig:         *conf,
   281  		Dialer:                clusterDialer,
   282  		Metrics:               NewMetrics(metricsProvider),
   283  		InactiveChainRegistry: icr,
   284  		BCCSP:                 bccsp,
   285  	}
   286  	consenter.Dispatcher = &Dispatcher{
   287  		Logger:        logger,
   288  		ChainSelector: consenter,
   289  	}
   290  
   291  	comm := createComm(clusterDialer, consenter, conf.General.Cluster, metricsProvider)
   292  	consenter.Communication = comm
   293  	svc := &cluster.Service{
   294  		CertExpWarningThreshold:          conf.General.Cluster.CertExpirationWarningThreshold,
   295  		MinimumExpirationWarningInterval: cluster.MinimumExpirationWarningInterval,
   296  		StreamCountReporter: &cluster.StreamCountReporter{
   297  			Metrics: comm.Metrics,
   298  		},
   299  		StepLogger: flogging.MustGetLogger("orderer.common.cluster.step"),
   300  		Logger:     flogging.MustGetLogger("orderer.common.cluster"),
   301  		Dispatcher: comm,
   302  	}
   303  	orderer.RegisterClusterServer(srv.Server(), svc)
   304  	return consenter
   305  }
   306  
   307  func createComm(clusterDialer *cluster.PredicateDialer, c *Consenter, config localconfig.Cluster, p metrics.Provider) *cluster.Comm {
   308  	metrics := cluster.NewMetrics(p)
   309  	comm := &cluster.Comm{
   310  		MinimumExpirationWarningInterval: cluster.MinimumExpirationWarningInterval,
   311  		CertExpWarningThreshold:          config.CertExpirationWarningThreshold,
   312  		SendBufferSize:                   config.SendBufferSize,
   313  		Logger:                           flogging.MustGetLogger("orderer.common.cluster"),
   314  		Chan2Members:                     make(map[string]cluster.MemberMapping),
   315  		Connections:                      cluster.NewConnectionStore(clusterDialer, metrics.EgressTLSConnectionCount),
   316  		Metrics:                          metrics,
   317  		ChanExt:                          c,
   318  		H:                                c,
   319  	}
   320  	c.Communication = comm
   321  	return comm
   322  }