github.com/sykesm/fabric@v1.1.0-preview.0.20200129034918-2aa12b1a0181/orderer/consensus/etcdraft/consenter.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package etcdraft
     8  
     9  import (
    10  	"bytes"
    11  	"path"
    12  	"reflect"
    13  	"time"
    14  
    15  	"code.cloudfoundry.org/clock"
    16  	"github.com/golang/protobuf/proto"
    17  	"github.com/hyperledger/fabric-protos-go/common"
    18  	"github.com/hyperledger/fabric-protos-go/orderer"
    19  	"github.com/hyperledger/fabric-protos-go/orderer/etcdraft"
    20  	"github.com/hyperledger/fabric/bccsp"
    21  	"github.com/hyperledger/fabric/common/flogging"
    22  	"github.com/hyperledger/fabric/common/metrics"
    23  	"github.com/hyperledger/fabric/core/comm"
    24  	"github.com/hyperledger/fabric/orderer/common/cluster"
    25  	"github.com/hyperledger/fabric/orderer/common/localconfig"
    26  	"github.com/hyperledger/fabric/orderer/common/multichannel"
    27  	"github.com/hyperledger/fabric/orderer/consensus"
    28  	"github.com/hyperledger/fabric/orderer/consensus/inactive"
    29  	"github.com/mitchellh/mapstructure"
    30  	"github.com/pkg/errors"
    31  	"go.etcd.io/etcd/raft"
    32  )
    33  
    34  // CreateChainCallback creates a new chain
    35  type CreateChainCallback func()
    36  
    37  //go:generate mockery -dir . -name InactiveChainRegistry -case underscore -output mocks
    38  
    39  // InactiveChainRegistry registers chains that are inactive
    40  type InactiveChainRegistry interface {
    41  	// TrackChain tracks a chain with the given name, and calls the given callback
    42  	// when this chain should be created.
    43  	TrackChain(chainName string, genesisBlock *common.Block, createChain CreateChainCallback)
    44  }
    45  
    46  //go:generate mockery -dir . -name ChainGetter -case underscore -output mocks
    47  
    48  // ChainGetter obtains instances of ChainSupport for the given channel
    49  type ChainGetter interface {
    50  	// GetChain obtains the ChainSupport for the given channel.
    51  	// Returns nil, false when the ChainSupport for the given channel
    52  	// isn't found.
    53  	GetChain(chainID string) *multichannel.ChainSupport
    54  }
    55  
    56  // Config contains etcdraft configurations
    57  type Config struct {
    58  	WALDir            string // WAL data of <my-channel> is stored in WALDir/<my-channel>
    59  	SnapDir           string // Snapshots of <my-channel> are stored in SnapDir/<my-channel>
    60  	EvictionSuspicion string // Duration threshold that the node samples in order to suspect its eviction from the channel.
    61  }
    62  
    63  // Consenter implements etcdraft consenter
    64  type Consenter struct {
    65  	CreateChain           func(chainName string)
    66  	InactiveChainRegistry InactiveChainRegistry
    67  	Dialer                *cluster.PredicateDialer
    68  	Communication         cluster.Communicator
    69  	*Dispatcher
    70  	Chains         ChainGetter
    71  	Logger         *flogging.FabricLogger
    72  	EtcdRaftConfig Config
    73  	OrdererConfig  localconfig.TopLevel
    74  	Cert           []byte
    75  	Metrics        *Metrics
    76  	BCCSP          bccsp.BCCSP
    77  }
    78  
    79  // TargetChannel extracts the channel from the given proto.Message.
    80  // Returns an empty string on failure.
    81  func (c *Consenter) TargetChannel(message proto.Message) string {
    82  	switch req := message.(type) {
    83  	case *orderer.ConsensusRequest:
    84  		return req.Channel
    85  	case *orderer.SubmitRequest:
    86  		return req.Channel
    87  	default:
    88  		return ""
    89  	}
    90  }
    91  
    92  // ReceiverByChain returns the MessageReceiver for the given channelID or nil
    93  // if not found.
    94  func (c *Consenter) ReceiverByChain(channelID string) MessageReceiver {
    95  	cs := c.Chains.GetChain(channelID)
    96  	if cs == nil {
    97  		return nil
    98  	}
    99  	if cs.Chain == nil {
   100  		c.Logger.Panicf("Programming error - Chain %s is nil although it exists in the mapping", channelID)
   101  	}
   102  	if etcdRaftChain, isEtcdRaftChain := cs.Chain.(*Chain); isEtcdRaftChain {
   103  		return etcdRaftChain
   104  	}
   105  	c.Logger.Warningf("Chain %s is of type %v and not etcdraft.Chain", channelID, reflect.TypeOf(cs.Chain))
   106  	return nil
   107  }
   108  
   109  func (c *Consenter) detectSelfID(consenters map[uint64]*etcdraft.Consenter) (uint64, error) {
   110  	thisNodeCertAsDER, err := pemToDER(c.Cert, 0, "server", c.Logger)
   111  	if err != nil {
   112  		return 0, err
   113  	}
   114  
   115  	var serverCertificates []string
   116  	for nodeID, cst := range consenters {
   117  		serverCertificates = append(serverCertificates, string(cst.ServerTlsCert))
   118  
   119  		certAsDER, err := pemToDER(cst.ServerTlsCert, nodeID, "server", c.Logger)
   120  		if err != nil {
   121  			return 0, err
   122  		}
   123  
   124  		if bytes.Equal(thisNodeCertAsDER, certAsDER) {
   125  			return nodeID, nil
   126  		}
   127  	}
   128  
   129  	c.Logger.Warning("Could not find", string(c.Cert), "among", serverCertificates)
   130  	return 0, cluster.ErrNotInChannel
   131  }
   132  
   133  // HandleChain returns a new Chain instance or an error upon failure
   134  func (c *Consenter) HandleChain(support consensus.ConsenterSupport, metadata *common.Metadata) (consensus.Chain, error) {
   135  	m := &etcdraft.ConfigMetadata{}
   136  	if err := proto.Unmarshal(support.SharedConfig().ConsensusMetadata(), m); err != nil {
   137  		return nil, errors.Wrap(err, "failed to unmarshal consensus metadata")
   138  	}
   139  
   140  	if m.Options == nil {
   141  		return nil, errors.New("etcdraft options have not been provided")
   142  	}
   143  
   144  	isMigration := (metadata == nil || len(metadata.Value) == 0) && (support.Height() > 1)
   145  	if isMigration {
   146  		c.Logger.Debugf("Block metadata is nil at block height=%d, it is consensus-type migration", support.Height())
   147  	}
   148  
   149  	// determine raft replica set mapping for each node to its id
   150  	// for newly started chain we need to read and initialize raft
   151  	// metadata by creating mapping between conseter and its id.
   152  	// In case chain has been restarted we restore raft metadata
   153  	// information from the recently committed block meta data
   154  	// field.
   155  	blockMetadata, err := ReadBlockMetadata(metadata, m)
   156  	if err != nil {
   157  		return nil, errors.Wrapf(err, "failed to read Raft metadata")
   158  	}
   159  
   160  	consenters := CreateConsentersMap(blockMetadata, m)
   161  
   162  	id, err := c.detectSelfID(consenters)
   163  	if err != nil {
   164  		c.InactiveChainRegistry.TrackChain(support.ChannelID(), support.Block(0), func() {
   165  			c.CreateChain(support.ChannelID())
   166  		})
   167  		return &inactive.Chain{Err: errors.Errorf("channel %s is not serviced by me", support.ChannelID())}, nil
   168  	}
   169  
   170  	var evictionSuspicion time.Duration
   171  	if c.EtcdRaftConfig.EvictionSuspicion == "" {
   172  		c.Logger.Infof("EvictionSuspicion not set, defaulting to %v", DefaultEvictionSuspicion)
   173  		evictionSuspicion = DefaultEvictionSuspicion
   174  	} else {
   175  		evictionSuspicion, err = time.ParseDuration(c.EtcdRaftConfig.EvictionSuspicion)
   176  		if err != nil {
   177  			c.Logger.Panicf("Failed parsing Consensus.EvictionSuspicion: %s: %v", c.EtcdRaftConfig.EvictionSuspicion, err)
   178  		}
   179  	}
   180  
   181  	tickInterval, err := time.ParseDuration(m.Options.TickInterval)
   182  	if err != nil {
   183  		return nil, errors.Errorf("failed to parse TickInterval (%s) to time duration", m.Options.TickInterval)
   184  	}
   185  
   186  	opts := Options{
   187  		RaftID:        id,
   188  		Clock:         clock.NewClock(),
   189  		MemoryStorage: raft.NewMemoryStorage(),
   190  		Logger:        c.Logger,
   191  
   192  		TickInterval:         tickInterval,
   193  		ElectionTick:         int(m.Options.ElectionTick),
   194  		HeartbeatTick:        int(m.Options.HeartbeatTick),
   195  		MaxInflightBlocks:    int(m.Options.MaxInflightBlocks),
   196  		MaxSizePerMsg:        uint64(support.SharedConfig().BatchSize().PreferredMaxBytes),
   197  		SnapshotIntervalSize: m.Options.SnapshotIntervalSize,
   198  
   199  		BlockMetadata: blockMetadata,
   200  		Consenters:    consenters,
   201  
   202  		MigrationInit: isMigration,
   203  
   204  		WALDir:            path.Join(c.EtcdRaftConfig.WALDir, support.ChannelID()),
   205  		SnapDir:           path.Join(c.EtcdRaftConfig.SnapDir, support.ChannelID()),
   206  		EvictionSuspicion: evictionSuspicion,
   207  		Cert:              c.Cert,
   208  		Metrics:           c.Metrics,
   209  	}
   210  
   211  	rpc := &cluster.RPC{
   212  		Timeout:       c.OrdererConfig.General.Cluster.RPCTimeout,
   213  		Logger:        c.Logger,
   214  		Channel:       support.ChannelID(),
   215  		Comm:          c.Communication,
   216  		StreamsByType: cluster.NewStreamsByType(),
   217  	}
   218  	return NewChain(
   219  		support,
   220  		opts,
   221  		c.Communication,
   222  		rpc,
   223  		c.BCCSP,
   224  		func() (BlockPuller, error) {
   225  			return NewBlockPuller(support, c.Dialer, c.OrdererConfig.General.Cluster, c.BCCSP)
   226  		},
   227  		func() {
   228  			c.InactiveChainRegistry.TrackChain(support.ChannelID(), nil, func() { c.CreateChain(support.ChannelID()) })
   229  		},
   230  		nil,
   231  	)
   232  }
   233  
   234  // ReadBlockMetadata attempts to read raft metadata from block metadata, if available.
   235  // otherwise, it reads raft metadata from config metadata supplied.
   236  func ReadBlockMetadata(blockMetadata *common.Metadata, configMetadata *etcdraft.ConfigMetadata) (*etcdraft.BlockMetadata, error) {
   237  	if blockMetadata != nil && len(blockMetadata.Value) != 0 { // we have consenters mapping from block
   238  		m := &etcdraft.BlockMetadata{}
   239  		if err := proto.Unmarshal(blockMetadata.Value, m); err != nil {
   240  			return nil, errors.Wrap(err, "failed to unmarshal block's metadata")
   241  		}
   242  		return m, nil
   243  	}
   244  
   245  	m := &etcdraft.BlockMetadata{
   246  		NextConsenterId: 1,
   247  		ConsenterIds:    make([]uint64, len(configMetadata.Consenters)),
   248  	}
   249  	// need to read consenters from the configuration
   250  	for i := range m.ConsenterIds {
   251  		m.ConsenterIds[i] = m.NextConsenterId
   252  		m.NextConsenterId++
   253  	}
   254  
   255  	return m, nil
   256  }
   257  
   258  // New creates a etcdraft Consenter
   259  func New(
   260  	clusterDialer *cluster.PredicateDialer,
   261  	conf *localconfig.TopLevel,
   262  	srvConf comm.ServerConfig,
   263  	srv *comm.GRPCServer,
   264  	r *multichannel.Registrar,
   265  	icr InactiveChainRegistry,
   266  	metricsProvider metrics.Provider,
   267  	bccsp bccsp.BCCSP,
   268  ) *Consenter {
   269  	logger := flogging.MustGetLogger("orderer.consensus.etcdraft")
   270  
   271  	var cfg Config
   272  	err := mapstructure.Decode(conf.Consensus, &cfg)
   273  	if err != nil {
   274  		logger.Panicf("Failed to decode etcdraft configuration: %s", err)
   275  	}
   276  
   277  	consenter := &Consenter{
   278  		CreateChain:           r.CreateChain,
   279  		Cert:                  srvConf.SecOpts.Certificate,
   280  		Logger:                logger,
   281  		Chains:                r,
   282  		EtcdRaftConfig:        cfg,
   283  		OrdererConfig:         *conf,
   284  		Dialer:                clusterDialer,
   285  		Metrics:               NewMetrics(metricsProvider),
   286  		InactiveChainRegistry: icr,
   287  		BCCSP:                 bccsp,
   288  	}
   289  	consenter.Dispatcher = &Dispatcher{
   290  		Logger:        logger,
   291  		ChainSelector: consenter,
   292  	}
   293  
   294  	comm := createComm(clusterDialer, consenter, conf.General.Cluster, metricsProvider)
   295  	consenter.Communication = comm
   296  	svc := &cluster.Service{
   297  		CertExpWarningThreshold:          conf.General.Cluster.CertExpirationWarningThreshold,
   298  		MinimumExpirationWarningInterval: cluster.MinimumExpirationWarningInterval,
   299  		StreamCountReporter: &cluster.StreamCountReporter{
   300  			Metrics: comm.Metrics,
   301  		},
   302  		StepLogger: flogging.MustGetLogger("orderer.common.cluster.step"),
   303  		Logger:     flogging.MustGetLogger("orderer.common.cluster"),
   304  		Dispatcher: comm,
   305  	}
   306  	orderer.RegisterClusterServer(srv.Server(), svc)
   307  	return consenter
   308  }
   309  
   310  func createComm(clusterDialer *cluster.PredicateDialer, c *Consenter, config localconfig.Cluster, p metrics.Provider) *cluster.Comm {
   311  	metrics := cluster.NewMetrics(p)
   312  	comm := &cluster.Comm{
   313  		MinimumExpirationWarningInterval: cluster.MinimumExpirationWarningInterval,
   314  		CertExpWarningThreshold:          config.CertExpirationWarningThreshold,
   315  		SendBufferSize:                   config.SendBufferSize,
   316  		Logger:                           flogging.MustGetLogger("orderer.common.cluster"),
   317  		Chan2Members:                     make(map[string]cluster.MemberMapping),
   318  		Connections:                      cluster.NewConnectionStore(clusterDialer, metrics.EgressTLSConnectionCount),
   319  		Metrics:                          metrics,
   320  		ChanExt:                          c,
   321  		H:                                c,
   322  	}
   323  	c.Communication = comm
   324  	return comm
   325  }