github.com/erda-project/erda-infra@v1.0.9/providers/kafka/examples/batch-reader/main.go (about)

     1  // Copyright (c) 2021 Terminus, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package main
    16  
    17  import (
    18  	"context"
    19  	"encoding/json"
    20  	"fmt"
    21  	"os"
    22  	"time"
    23  
    24  	gokafka "github.com/confluentinc/confluent-kafka-go/kafka"
    25  	"github.com/erda-project/erda-infra/base/logs"
    26  	"github.com/erda-project/erda-infra/base/servicehub"
    27  	"github.com/erda-project/erda-infra/providers/kafka"
    28  )
    29  
    30  type config struct {
    31  	Input kafka.BatchReaderConfig `file:"input"`
    32  }
    33  
    34  type provider struct {
    35  	Cfg   *config
    36  	Log   logs.Logger
    37  	Kafka kafka.Interface `autowired:"kafka-consumer"`
    38  }
    39  
    40  func (p *provider) Run(ctx context.Context) error {
    41  	reader, err := p.Kafka.NewBatchReader(&p.Cfg.Input,
    42  		kafka.WithReaderDecoder(func(key, value []byte, topic *string, timestamp time.Time) (interface{}, error) {
    43  			m := make(map[string]interface{})
    44  			err := json.Unmarshal(value, &m)
    45  			return m, err
    46  		}),
    47  	)
    48  	if err != nil {
    49  		return err
    50  	}
    51  	defer reader.Close()
    52  
    53  	bufferSize := 100
    54  	limit := 200
    55  	buf := make([]interface{}, bufferSize)
    56  
    57  	for {
    58  		// check
    59  		if limit <= 0 {
    60  			return nil
    61  		}
    62  		select {
    63  		case <-ctx.Done():
    64  			return nil
    65  		default:
    66  		}
    67  
    68  		// read data
    69  		n, err := reader.ReadN(buf, time.Second)
    70  		if err != nil {
    71  			return err
    72  		}
    73  		if n <= 0 {
    74  			continue
    75  		}
    76  
    77  		// process data
    78  		for i := 0; i < n; i++ {
    79  			fmt.Println(buf[i])
    80  			limit--
    81  			if limit <= 0 {
    82  				break
    83  			}
    84  		}
    85  
    86  		// print offsets before confirm
    87  		fmt.Println("offsets before confirm")
    88  		ps, _ := kafka.CommittedOffsets(reader)
    89  		printPartitions(ps)
    90  
    91  		// commit offsets
    92  		err = reader.Confirm()
    93  		if err != nil {
    94  			return nil
    95  		}
    96  
    97  		// print offsets after confirm
    98  		fmt.Println("offsets after confirm")
    99  		ps, _ = kafka.CommittedOffsets(reader)
   100  		printPartitions(ps)
   101  	}
   102  }
   103  
   104  func printPartitions(partitions []gokafka.TopicPartition) {
   105  	for _, p := range partitions {
   106  		byts, _ := json.Marshal(p)
   107  		fmt.Println("partition:", string(byts))
   108  	}
   109  }
   110  
   111  func init() {
   112  	servicehub.Register("examples", &servicehub.Spec{
   113  		Services:   []string{"hello"},
   114  		ConfigFunc: func() interface{} { return &config{} },
   115  		Creator: func() servicehub.Provider {
   116  			return &provider{}
   117  		},
   118  	})
   119  }
   120  
   121  func main() {
   122  	hub := servicehub.New()
   123  	hub.Run("examples", "", os.Args...)
   124  }