github.com/darrenli6/fabric-sdk-example@v0.0.0-20220109053535-94b13b56df8c/orderer/localconfig/config.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8                   http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package config
    18  
    19  import (
    20  	"strings"
    21  	"time"
    22  
    23  	"github.com/hyperledger/fabric/common/flogging"
    24  	"github.com/hyperledger/fabric/common/viperutil"
    25  
    26  	"github.com/Shopify/sarama"
    27  	"github.com/op/go-logging"
    28  	"github.com/spf13/viper"
    29  
    30  	cf "github.com/hyperledger/fabric/core/config"
    31  
    32  	"path/filepath"
    33  
    34  	bccsp "github.com/hyperledger/fabric/bccsp/factory"
    35  )
    36  
    37  const (
    38  	pkgLogID = "orderer/localconfig"
    39  
    40  	// Prefix identifies the prefix for the orderer-related ENV vars.
    41  	Prefix = "ORDERER"
    42  )
    43  
    44  var (
    45  	logger *logging.Logger
    46  
    47  	configName string
    48  )
    49  
    50  func init() {
    51  	logger = flogging.MustGetLogger(pkgLogID)
    52  	flogging.SetModuleLevel(pkgLogID, "error")
    53  
    54  	configName = strings.ToLower(Prefix)
    55  }
    56  
    57  // TopLevel directly corresponds to the orderer config YAML.
    58  // Note, for non 1-1 mappings, you may append
    59  // something like `mapstructure:"weirdFoRMat"` to
    60  // modify the default mapping, see the "Unmarshal"
    61  // section of https://github.com/spf13/viper for more info
    62  type TopLevel struct {
    63  	General    General
    64  	FileLedger FileLedger
    65  	RAMLedger  RAMLedger
    66  	Kafka      Kafka
    67  }
    68  
    69  // General contains config which should be common among all orderer types.
    70  type General struct {
    71  	LedgerType     string
    72  	ListenAddress  string
    73  	ListenPort     uint16
    74  	TLS            TLS
    75  	GenesisMethod  string
    76  	GenesisProfile string
    77  	GenesisFile    string
    78  	Profile        Profile
    79  	LogLevel       string
    80  	LogFormat      string
    81  	LocalMSPDir    string
    82  	LocalMSPID     string
    83  	BCCSP          *bccsp.FactoryOpts
    84  }
    85  
    86  // TLS contains config for TLS connections.
    87  type TLS struct {
    88  	Enabled           bool
    89  	PrivateKey        string
    90  	Certificate       string
    91  	RootCAs           []string
    92  	ClientAuthEnabled bool
    93  	ClientRootCAs     []string
    94  }
    95  
    96  // Profile contains configuration for Go pprof profiling.
    97  type Profile struct {
    98  	Enabled bool
    99  	Address string
   100  }
   101  
   102  // FileLedger contains configuration for the file-based ledger.
   103  type FileLedger struct {
   104  	Location string
   105  	Prefix   string
   106  }
   107  
   108  // RAMLedger contains configuration for the RAM ledger.
   109  type RAMLedger struct {
   110  	HistorySize uint
   111  }
   112  
   113  // Kafka contains configuration for the Kafka-based orderer.
   114  type Kafka struct {
   115  	Retry   Retry
   116  	Verbose bool
   117  	Version sarama.KafkaVersion // TODO Move this to global config
   118  	TLS     TLS
   119  }
   120  
   121  // Retry contains configuration related to retries and timeouts when the
   122  // connection to the Kafka cluster cannot be established, or when Metadata
   123  // requests needs to be repeated (because the cluster is in the middle of a
   124  // leader election).
   125  type Retry struct {
   126  	ShortInterval   time.Duration
   127  	ShortTotal      time.Duration
   128  	LongInterval    time.Duration
   129  	LongTotal       time.Duration
   130  	NetworkTimeouts NetworkTimeouts
   131  	Metadata        Metadata
   132  	Producer        Producer
   133  	Consumer        Consumer
   134  }
   135  
   136  // NetworkTimeouts contains the socket timeouts for network requests to the
   137  // Kafka cluster.
   138  type NetworkTimeouts struct {
   139  	DialTimeout  time.Duration
   140  	ReadTimeout  time.Duration
   141  	WriteTimeout time.Duration
   142  }
   143  
   144  // Metadata contains configuration for the metadata requests to the Kafka
   145  // cluster.
   146  type Metadata struct {
   147  	RetryMax     int
   148  	RetryBackoff time.Duration
   149  }
   150  
   151  // Producer contains configuration for the producer's retries when failing to
   152  // post a message to a Kafka partition.
   153  type Producer struct {
   154  	RetryMax     int
   155  	RetryBackoff time.Duration
   156  }
   157  
   158  // Consumer contains configuration for the consumer's retries when failing to
   159  // read from a Kafa partition.
   160  type Consumer struct {
   161  	RetryBackoff time.Duration
   162  }
   163  
   164  var defaults = TopLevel{
   165  	General: General{
   166  		LedgerType:     "file",
   167  		ListenAddress:  "127.0.0.1",
   168  		ListenPort:     7050,
   169  		GenesisMethod:  "provisional",
   170  		GenesisProfile: "SampleSingleMSPSolo",
   171  		GenesisFile:    "genesisblock",
   172  		Profile: Profile{
   173  			Enabled: false,
   174  			Address: "0.0.0.0:6060",
   175  		},
   176  		LogLevel:    "INFO",
   177  		LogFormat:   "%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}",
   178  		LocalMSPDir: "msp",
   179  		LocalMSPID:  "DEFAULT",
   180  		BCCSP:       bccsp.GetDefaultOpts(),
   181  	},
   182  	RAMLedger: RAMLedger{
   183  		HistorySize: 10000,
   184  	},
   185  	FileLedger: FileLedger{
   186  		Location: "/var/hyperledger/production/orderer",
   187  		Prefix:   "hyperledger-fabric-ordererledger",
   188  	},
   189  	Kafka: Kafka{
   190  		Retry: Retry{
   191  			ShortInterval: 1 * time.Minute,
   192  			ShortTotal:    10 * time.Minute,
   193  			LongInterval:  10 * time.Minute,
   194  			LongTotal:     12 * time.Hour,
   195  			NetworkTimeouts: NetworkTimeouts{
   196  				DialTimeout:  30 * time.Second,
   197  				ReadTimeout:  30 * time.Second,
   198  				WriteTimeout: 30 * time.Second,
   199  			},
   200  			Metadata: Metadata{
   201  				RetryBackoff: 250 * time.Millisecond,
   202  				RetryMax:     3,
   203  			},
   204  			Producer: Producer{
   205  				RetryBackoff: 100 * time.Millisecond,
   206  				RetryMax:     3,
   207  			},
   208  			Consumer: Consumer{
   209  				RetryBackoff: 2 * time.Second,
   210  			},
   211  		},
   212  		Verbose: false,
   213  		Version: sarama.V0_9_0_1,
   214  		TLS: TLS{
   215  			Enabled: false,
   216  		},
   217  	},
   218  }
   219  
   220  // Load parses the orderer.yaml file and environment, producing a struct suitable for config use
   221  func Load() *TopLevel {
   222  	config := viper.New()
   223  	cf.InitViper(config, configName)
   224  
   225  	// for environment variables
   226  	config.SetEnvPrefix(Prefix)
   227  	config.AutomaticEnv()
   228  	replacer := strings.NewReplacer(".", "_")
   229  	config.SetEnvKeyReplacer(replacer)
   230  
   231  	err := config.ReadInConfig()
   232  	if err != nil {
   233  		logger.Panic("Error reading configuration:", err)
   234  	}
   235  
   236  	var uconf TopLevel
   237  	err = viperutil.EnhancedExactUnmarshal(config, &uconf)
   238  	if err != nil {
   239  		logger.Panic("Error unmarshaling config into struct:", err)
   240  	}
   241  
   242  	uconf.completeInitialization(filepath.Dir(config.ConfigFileUsed()))
   243  
   244  	return &uconf
   245  }
   246  
   247  func (c *TopLevel) completeInitialization(configDir string) {
   248  	defer func() {
   249  		// Translate any paths
   250  		c.General.TLS.RootCAs = translateCAs(configDir, c.General.TLS.RootCAs)
   251  		c.General.TLS.ClientRootCAs = translateCAs(configDir, c.General.TLS.ClientRootCAs)
   252  		cf.TranslatePathInPlace(configDir, &c.General.TLS.PrivateKey)
   253  		cf.TranslatePathInPlace(configDir, &c.General.TLS.Certificate)
   254  		cf.TranslatePathInPlace(configDir, &c.General.GenesisFile)
   255  		cf.TranslatePathInPlace(configDir, &c.General.LocalMSPDir)
   256  	}()
   257  
   258  	for {
   259  		switch {
   260  		case c.General.LedgerType == "":
   261  			logger.Infof("General.LedgerType unset, setting to %s", defaults.General.LedgerType)
   262  			c.General.LedgerType = defaults.General.LedgerType
   263  
   264  		case c.General.ListenAddress == "":
   265  			logger.Infof("General.ListenAddress unset, setting to %s", defaults.General.ListenAddress)
   266  			c.General.ListenAddress = defaults.General.ListenAddress
   267  		case c.General.ListenPort == 0:
   268  			logger.Infof("General.ListenPort unset, setting to %s", defaults.General.ListenPort)
   269  			c.General.ListenPort = defaults.General.ListenPort
   270  
   271  		case c.General.LogLevel == "":
   272  			logger.Infof("General.LogLevel unset, setting to %s", defaults.General.LogLevel)
   273  			c.General.LogLevel = defaults.General.LogLevel
   274  		case c.General.LogFormat == "":
   275  			logger.Infof("General.LogFormat unset, setting to %s", defaults.General.LogFormat)
   276  			c.General.LogFormat = defaults.General.LogFormat
   277  
   278  		case c.General.GenesisMethod == "":
   279  			c.General.GenesisMethod = defaults.General.GenesisMethod
   280  		case c.General.GenesisFile == "":
   281  			c.General.GenesisFile = defaults.General.GenesisFile
   282  		case c.General.GenesisProfile == "":
   283  			c.General.GenesisProfile = defaults.General.GenesisProfile
   284  
   285  		case c.Kafka.TLS.Enabled && c.Kafka.TLS.Certificate == "":
   286  			logger.Panicf("General.Kafka.TLS.Certificate must be set if General.Kafka.TLS.Enabled is set to true.")
   287  		case c.Kafka.TLS.Enabled && c.Kafka.TLS.PrivateKey == "":
   288  			logger.Panicf("General.Kafka.TLS.PrivateKey must be set if General.Kafka.TLS.Enabled is set to true.")
   289  		case c.Kafka.TLS.Enabled && c.Kafka.TLS.RootCAs == nil:
   290  			logger.Panicf("General.Kafka.TLS.CertificatePool must be set if General.Kafka.TLS.Enabled is set to true.")
   291  
   292  		case c.General.Profile.Enabled && c.General.Profile.Address == "":
   293  			logger.Infof("Profiling enabled and General.Profile.Address unset, setting to %s", defaults.General.Profile.Address)
   294  			c.General.Profile.Address = defaults.General.Profile.Address
   295  
   296  		case c.General.LocalMSPDir == "":
   297  			logger.Infof("General.LocalMSPDir unset, setting to %s", defaults.General.LocalMSPDir)
   298  			c.General.LocalMSPDir = defaults.General.LocalMSPDir
   299  		case c.General.LocalMSPID == "":
   300  			logger.Infof("General.LocalMSPID unset, setting to %s", defaults.General.LocalMSPID)
   301  			c.General.LocalMSPID = defaults.General.LocalMSPID
   302  
   303  		case c.FileLedger.Prefix == "":
   304  			logger.Infof("FileLedger.Prefix unset, setting to %s", defaults.FileLedger.Prefix)
   305  			c.FileLedger.Prefix = defaults.FileLedger.Prefix
   306  
   307  		case c.Kafka.Retry.ShortInterval == 0*time.Minute:
   308  			logger.Infof("Kafka.Retry.ShortInterval unset, setting to %v", defaults.Kafka.Retry.ShortInterval)
   309  			c.Kafka.Retry.ShortInterval = defaults.Kafka.Retry.ShortInterval
   310  		case c.Kafka.Retry.ShortTotal == 0*time.Minute:
   311  			logger.Infof("Kafka.Retry.ShortTotal unset, setting to %v", defaults.Kafka.Retry.ShortTotal)
   312  			c.Kafka.Retry.ShortTotal = defaults.Kafka.Retry.ShortTotal
   313  		case c.Kafka.Retry.LongInterval == 0*time.Minute:
   314  			logger.Infof("Kafka.Retry.LongInterval unset, setting to %v", defaults.Kafka.Retry.LongInterval)
   315  			c.Kafka.Retry.LongInterval = defaults.Kafka.Retry.LongInterval
   316  		case c.Kafka.Retry.LongTotal == 0*time.Minute:
   317  			logger.Infof("Kafka.Retry.LongTotal unset, setting to %v", defaults.Kafka.Retry.LongTotal)
   318  			c.Kafka.Retry.LongTotal = defaults.Kafka.Retry.LongTotal
   319  
   320  		case c.Kafka.Retry.NetworkTimeouts.DialTimeout == 0*time.Second:
   321  			logger.Infof("Kafka.Retry.NetworkTimeouts.DialTimeout unset, setting to %v", defaults.Kafka.Retry.NetworkTimeouts.DialTimeout)
   322  			c.Kafka.Retry.NetworkTimeouts.DialTimeout = defaults.Kafka.Retry.NetworkTimeouts.DialTimeout
   323  		case c.Kafka.Retry.NetworkTimeouts.ReadTimeout == 0*time.Second:
   324  			logger.Infof("Kafka.Retry.NetworkTimeouts.ReadTimeout unset, setting to %v", defaults.Kafka.Retry.NetworkTimeouts.ReadTimeout)
   325  			c.Kafka.Retry.NetworkTimeouts.ReadTimeout = defaults.Kafka.Retry.NetworkTimeouts.ReadTimeout
   326  		case c.Kafka.Retry.NetworkTimeouts.WriteTimeout == 0*time.Second:
   327  			logger.Infof("Kafka.Retry.NetworkTimeouts.WriteTimeout unset, setting to %v", defaults.Kafka.Retry.NetworkTimeouts.WriteTimeout)
   328  			c.Kafka.Retry.NetworkTimeouts.WriteTimeout = defaults.Kafka.Retry.NetworkTimeouts.WriteTimeout
   329  
   330  		case c.Kafka.Retry.Metadata.RetryBackoff == 0*time.Second:
   331  			logger.Infof("Kafka.Retry.Metadata.RetryBackoff unset, setting to %v", defaults.Kafka.Retry.Metadata.RetryBackoff)
   332  			c.Kafka.Retry.Metadata.RetryBackoff = defaults.Kafka.Retry.Metadata.RetryBackoff
   333  		case c.Kafka.Retry.Metadata.RetryMax == 0:
   334  			logger.Infof("Kafka.Retry.Metadata.RetryMax unset, setting to %v", defaults.Kafka.Retry.Metadata.RetryMax)
   335  			c.Kafka.Retry.Metadata.RetryMax = defaults.Kafka.Retry.Metadata.RetryMax
   336  
   337  		case c.Kafka.Retry.Producer.RetryBackoff == 0*time.Second:
   338  			logger.Infof("Kafka.Retry.Producer.RetryBackoff unset, setting to %v", defaults.Kafka.Retry.Producer.RetryBackoff)
   339  			c.Kafka.Retry.Producer.RetryBackoff = defaults.Kafka.Retry.Producer.RetryBackoff
   340  		case c.Kafka.Retry.Producer.RetryMax == 0:
   341  			logger.Infof("Kafka.Retry.Producer.RetryMax unset, setting to %v", defaults.Kafka.Retry.Producer.RetryMax)
   342  			c.Kafka.Retry.Producer.RetryMax = defaults.Kafka.Retry.Producer.RetryMax
   343  
   344  		case c.Kafka.Retry.Consumer.RetryBackoff == 0*time.Second:
   345  			logger.Infof("Kafka.Retry.Consumer.RetryBackoff unset, setting to %v", defaults.Kafka.Retry.Consumer.RetryBackoff)
   346  			c.Kafka.Retry.Consumer.RetryBackoff = defaults.Kafka.Retry.Consumer.RetryBackoff
   347  
   348  		case c.Kafka.Version == sarama.KafkaVersion{}:
   349  			logger.Infof("Kafka.Version unset, setting to %v", defaults.Kafka.Version)
   350  			c.Kafka.Version = defaults.Kafka.Version
   351  
   352  		default:
   353  			return
   354  		}
   355  	}
   356  }
   357  
   358  func translateCAs(configDir string, certificateAuthorities []string) []string {
   359  	var results []string
   360  	for _, ca := range certificateAuthorities {
   361  		result := cf.TranslatePath(configDir, ca)
   362  		results = append(results, result)
   363  	}
   364  	return results
   365  }