github.com/darrenli6/fabric-sdk-example@v0.0.0-20220109053535-94b13b56df8c/bddtests/dc-orderer-kafka-base.yml (about)

     1  # Copyright IBM Corp. All Rights Reserved.
     2  #
     3  # SPDX-License-Identifier: Apache-2.0
     4  #
     5  
     6  version: '2'
     7  
     8  services:
     9  
    10    zookeeper:
    11      image: hyperledger/fabric-zookeeper
    12      restart: always
    13      ports:
    14        - '2181'
    15        - '2888'
    16        - '3888'
    17  
    18    kafka:
    19      image: hyperledger/fabric-kafka
    20      restart: always
    21      environment:
    22        # ========================================================================
    23        #     Reference: https://kafka.apache.org/documentation/#configuration
    24        # ========================================================================
    25        #
    26        # socket.request.max.bytes
    27        # The maximum number of bytes in a socket request. ATTN: If you set this
    28        # env var, make sure to update `brokerConfig.Producer.MaxMessageBytes` in
    29        # `newBrokerConfig()` in `fabric/orderer/kafka/config.go` accordingly.
    30        #- KAFKA_SOCKET_REQUEST_MAX_BYTES=104857600 # 100 * 1024 * 1024 B
    31        #
    32        # message.max.bytes
    33        # The maximum size of envelope that the broker can receive.
    34        - KAFKA_MESSAGE_MAX_BYTES=103809024 # 99 * 1024 * 1024 B
    35        #
    36        # replica.fetch.max.bytes
    37        # The number of bytes of messages to attempt to fetch for each channel.
    38        # This is not an absolute maximum, if the fetched envelope is larger than
    39        # this value, the envelope will still be returned to ensure that progress
    40        # can be made. The maximum message size accepted by the broker is defined
    41        # via message.max.bytes above.
    42        - KAFKA_REPLICA_FETCH_MAX_BYTES=103809024 # 99 * 1024 * 1024 B
    43        #
    44        # unclean.leader.election.enable
    45        # Data consistency is key in a blockchain environment. We cannot have a
    46        # leader chosen outside of the in-sync replica set, or we run the risk of
    47        # overwriting the offsets that the previous leader produced, and --as a
    48        # result-- rewriting the blockchain that the orderers produce.
    49        - KAFKA_UNCLEAN_LEADER_ELECTION_ENABLE=false
    50        #
    51        # log.retention.ms
    52        # Until the ordering service in Fabric adds support for pruning of the
    53        # Kafka logs, time-based retention should be disabled so as to prevent
    54        # segments from expiring. (Size-based retention -- see
    55        # log.retention.bytes -- is disabled by default so there is no need to set
    56        # it explicitly.)
    57        # - KAFKA_LOG_RETENTION_MS=-1
    58      ports:
    59        - '9092'