github.com/Jeffail/benthos/v3@v3.65.0/public/service/example_buffer_plugin_test.go (about) 1 package service_test 2 3 import ( 4 "context" 5 "sync" 6 7 "github.com/Jeffail/benthos/v3/public/service" 8 9 // Import all standard Benthos components 10 _ "github.com/Jeffail/benthos/v3/public/components/all" 11 ) 12 13 type memoryBuffer struct { 14 messages chan service.MessageBatch 15 endOfInputChan chan struct{} 16 closeOnce sync.Once 17 } 18 19 func newMemoryBuffer(n int) *memoryBuffer { 20 return &memoryBuffer{ 21 messages: make(chan service.MessageBatch, n), 22 endOfInputChan: make(chan struct{}), 23 } 24 } 25 26 func (m *memoryBuffer) WriteBatch(ctx context.Context, batch service.MessageBatch, aFn service.AckFunc) error { 27 select { 28 case m.messages <- batch: 29 case <-ctx.Done(): 30 return ctx.Err() 31 } 32 // We weaken delivery guarantees here by acknowledging receipt of our batch 33 // immediately. 34 return aFn(ctx, nil) 35 } 36 37 func yoloIgnoreNacks(context.Context, error) error { 38 // YOLO: Drop messages that are nacked 39 return nil 40 } 41 42 func (m *memoryBuffer) ReadBatch(ctx context.Context) (service.MessageBatch, service.AckFunc, error) { 43 select { 44 case msg := <-m.messages: 45 return msg, yoloIgnoreNacks, nil 46 case <-ctx.Done(): 47 return nil, nil, ctx.Err() 48 case <-m.endOfInputChan: 49 // Input has ended, so return ErrEndOfBuffer if our buffer is empty. 50 select { 51 case msg := <-m.messages: 52 return msg, yoloIgnoreNacks, nil 53 default: 54 return nil, nil, service.ErrEndOfBuffer 55 } 56 } 57 } 58 59 func (m *memoryBuffer) EndOfInput() { 60 m.closeOnce.Do(func() { 61 close(m.endOfInputChan) 62 }) 63 } 64 65 func (m *memoryBuffer) Close(ctx context.Context) error { 66 // Nothing to clean up 67 return nil 68 } 69 70 // This example demonstrates how to create a buffer plugin. Buffers are an 71 // advanced component type that most plugin authors aren't likely to require. 72 func Example_bufferPlugin() { 73 configSpec := service.NewConfigSpec(). 74 Summary("Creates a lame memory buffer that loses data on forced restarts or service crashes."). 75 Field(service.NewIntField("max_batches").Default(100)) 76 77 err := service.RegisterBatchBuffer("lame_memory", configSpec, 78 func(conf *service.ParsedConfig, mgr *service.Resources) (service.BatchBuffer, error) { 79 capacity, err := conf.FieldInt("max_batches") 80 if err != nil { 81 return nil, err 82 } 83 return newMemoryBuffer(capacity), nil 84 }) 85 if err != nil { 86 panic(err) 87 } 88 89 // And then execute Benthos with: 90 // service.RunCLI(context.Background()) 91 }