github.com/darrenli6/fabric-sdk-example@v0.0.0-20220109053535-94b13b56df8c/examples/cluster/config/orderer.yaml (about) 1 # Copyright IBM Corp. All Rights Reserved. 2 # 3 # SPDX-License-Identifier: Apache-2.0 4 # 5 6 --- 7 ################################################################################ 8 # 9 # Orderer Configuration 10 # 11 # - This controls the type and configuration of the orderer. 12 # 13 ################################################################################ 14 General: 15 16 # Ledger Type: The ledger type to provide to the orderer. 17 # Two non-production ledger types are provided for test purposes only: 18 # - ram: An in-memory ledger whose contents are lost on restart. 19 # - json: A simple file ledger that writes blocks to disk in JSON format. 20 # Only one production ledger type is provided: 21 # - file: A production file-based ledger. 22 LedgerType: file 23 24 # Listen address: The IP on which to bind to listen. 25 ListenAddress: 0.0.0.0 26 27 # Listen port: The port on which to bind to listen. 28 ListenPort: 7050 29 30 # TLS: TLS settings for the GRPC server. 31 TLS: 32 Enabled: true 33 PrivateKey: tls/server.key 34 Certificate: tls/server.crt 35 RootCAs: 36 - tls/ca.crt 37 ClientAuthEnabled: false 38 ClientRootCAs: 39 40 # Log Level: The level at which to log. This accepts logging specifications 41 # per: fabric/docs/Setup/logging-control.md 42 LogLevel: debug 43 44 # Genesis method: The method by which the genesis block for the orderer 45 # system channel is specified. Available options are "provisional", "file": 46 # - provisional: Utilizes a genesis profile, specified by GenesisProfile, 47 # to dynamically generate a new genesis block. 48 # - file: Uses the file provided by GenesisFile as the genesis block. 49 GenesisMethod: file 50 51 # Genesis profile: The profile to use to dynamically generate the genesis 52 # block to use when initializing the orderer system channel and 53 # GenesisMethod is set to "provisional". See the configtx.yaml file for the 54 # descriptions of the available profiles. Ignored if GenesisMethod is set to 55 # "file". 56 GenesisProfile: SampleSingleMSPSolo 57 58 # Genesis file: The file containing the genesis block to use when 59 # initializing the orderer system channel and GenesisMethod is set to 60 # "file". Ignored if GenesisMethod is set to "provisional". 61 GenesisFile: genesis.block 62 63 # LocalMSPDir is where to find the private crypto material needed by the 64 # orderer. It is set relative here as a default for dev environments but 65 # should be changed to the real location in production. 66 LocalMSPDir: msp 67 68 # LocalMSPID is the identity to register the local MSP material with the MSP 69 # manager. IMPORTANT: The local MSP ID of an orderer needs to match the MSP 70 # ID of one of the organizations defined in the orderer system channel's 71 # /Channel/Orderer configuration. The sample organization defined in the 72 # sample configuration provided has an MSP ID of "DEFAULT". 73 LocalMSPID: OrdererMSP 74 75 # Enable an HTTP service for Go "pprof" profiling as documented at: 76 # https://golang.org/pkg/net/http/pprof 77 Profile: 78 Enabled: false 79 Address: 0.0.0.0:6060 80 81 # BCCSP configures the blockchain crypto service providers. 82 BCCSP: 83 # Default specifies the preferred blockchain crypto service provider 84 # to use. If the preferred provider is not available, the software 85 # based provider ("SW") will be used. 86 # Valid providers are: 87 # - SW: a software based crypto provider 88 # - PKCS11: a CA hardware security module crypto provider. 89 Default: SW 90 91 # SW configures the software based blockchain crypto provider. 92 SW: 93 # TODO: The default Hash and Security level needs refactoring to be 94 # fully configurable. Changing these defaults requires coordination 95 # SHA2 is hardcoded in several places, not only BCCSP 96 Hash: SHA2 97 Security: 256 98 # Location of key store. If this is unset, a location will be 99 # chosen using: 'LocalMSPDir'/keystore 100 FileKeyStore: 101 KeyStore: 102 103 ################################################################################ 104 # 105 # SECTION: File Ledger 106 # 107 # - This section applies to the configuration of the file or json ledgers. 108 # 109 ################################################################################ 110 FileLedger: 111 112 # Location: The directory to store the blocks in. 113 # NOTE: If this is unset, a new temporary location will be chosen every time 114 # the orderer is restarted, using the prefix specified by Prefix. 115 Location: /var/hyperledger/fabric/orderer 116 117 # The prefix to use when generating a ledger directory in temporary space. 118 # Otherwise, this value is ignored. 119 Prefix: hyperledger-fabric-ordererledger 120 121 ################################################################################ 122 # 123 # SECTION: RAM Ledger 124 # 125 # - This section applies to the configuration of the RAM ledger. 126 # 127 ################################################################################ 128 RAMLedger: 129 130 # History Size: The number of blocks that the RAM ledger is set to retain. 131 # WARNING: Appending a block to the ledger might cause the oldest block in 132 # the ledger to be dropped in order to limit the number total number blocks 133 # to HistorySize. For example, if history size is 10, when appending block 134 # 10, block 0 (the genesis block!) will be dropped to make room for block 10. 135 HistorySize: 1000 136 137 ################################################################################ 138 # 139 # SECTION: Kafka 140 # 141 # - This section applies to the configuration of the Kafka-based orderer, and 142 # its interaction with the Kafka cluster. 143 # 144 ################################################################################ 145 Kafka: 146 147 # Retry: What do if a connection to the Kafka cluster cannot be established, 148 # or if a metadata request to the Kafka cluster needs to be repeated. 149 Retry: 150 # When a new channel is created, or when an existing channel is reloaded 151 # (in case of a just-restarted orderer), the orderer interacts with the 152 # Kafka cluster in the following ways: 153 # 1. It creates a Kafka producer (writer) for the Kafka partition that 154 # corresponds to the channel. 155 # 2. It uses that producer to post a no-op CONNECT message to that 156 # partition 157 # 3. It creates a Kafka consumer (reader) for that partition. 158 # If any of these steps fail, they will be re-attempted every 159 # <ShortInterval> for a total of <ShortTotal>, and then every 160 # <LongInterval> for a total of <LongTotal> until they succeed. 161 # Note that the orderer will be unable to write to or read from a 162 # channel until all of the steps above have been completed successfully. 163 ShortInterval: 5s 164 ShortTotal: 10m 165 LongInterval: 5m 166 LongTotal: 12h 167 # Affects the socket timeouts when waiting for an initial connection, a 168 # response, or a transmission. See Config.Net for more info: 169 # https://godoc.org/github.com/Shopify/sarama#Config 170 NetworkTimeouts: 171 DialTimeout: 10s 172 ReadTimeout: 10s 173 WriteTimeout: 10s 174 # Affects the metadata requests when the Kafka cluster is in the middle 175 # of a leader election.See Config.Metadata for more info: 176 # https://godoc.org/github.com/Shopify/sarama#Config 177 Metadata: 178 RetryBackoff: 250ms 179 RetryMax: 3 180 # What to do if posting a message to the Kafka cluster fails. See 181 # Config.Producer for more info: 182 # https://godoc.org/github.com/Shopify/sarama#Config 183 Producer: 184 RetryBackoff: 100ms 185 RetryMax: 3 186 # What to do if reading from the Kafka cluster fails. See 187 # Config.Consumer for more info: 188 # https://godoc.org/github.com/Shopify/sarama#Config 189 Consumer: 190 RetryBackoff: 2s 191 192 # Verbose: Enable logging for interactions with the Kafka cluster. 193 Verbose: false 194 195 # TLS: TLS settings for the orderer's connection to the Kafka cluster. 196 TLS: 197 198 # Enabled: Use TLS when connecting to the Kafka cluster. 199 Enabled: false 200 201 # PrivateKey: PEM-encoded private key the orderer will use for 202 # authentication. 203 PrivateKey: 204 # As an alternative to specifying the PrivateKey here, uncomment the 205 # following "File" key and specify the file name from which to load the 206 # value of PrivateKey. 207 #File: path/to/PrivateKey 208 209 # Certificate: PEM-encoded signed public key certificate the orderer will 210 # use for authentication. 211 Certificate: 212 # As an alternative to specifying the Certificate here, uncomment the 213 # following "File" key and specify the file name from which to load the 214 # value of Certificate. 215 #File: path/to/Certificate 216 217 # RootCAs: PEM-encoded trusted root certificates used to validate 218 # certificates from the Kafka cluster. 219 RootCAs: 220 # As an alternative to specifying the RootCAs here, uncomment the 221 # following "File" key and specify the file name from which to load the 222 # value of RootCAs. 223 #File: path/to/RootCAs 224 225 # Kafka version of the Kafka cluster brokers (defaults to 0.9.0.1) 226 Version: