github.com/kaleido-io/firefly@v0.0.0-20210622132723-8b4b6aacb971/internal/batch/batch_processor_test.go (about) 1 // Copyright © 2021 Kaleido, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in comdiliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or imdilied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package batch 16 17 import ( 18 "context" 19 "fmt" 20 "sync" 21 "testing" 22 "time" 23 24 "github.com/kaleido-io/firefly/internal/log" 25 "github.com/kaleido-io/firefly/internal/retry" 26 "github.com/kaleido-io/firefly/mocks/databasemocks" 27 "github.com/kaleido-io/firefly/pkg/fftypes" 28 "github.com/stretchr/testify/assert" 29 "github.com/stretchr/testify/mock" 30 ) 31 32 func newTestBatchProcessor(dispatch DispatchHandler) (*databasemocks.Plugin, *batchProcessor) { 33 mdi := &databasemocks.Plugin{} 34 bp := newBatchProcessor(context.Background(), mdi, &batchProcessorConf{ 35 namespace: "ns1", 36 author: "0x12345", 37 dispatch: dispatch, 38 processorQuiescing: func() {}, 39 Options: Options{ 40 BatchMaxSize: 10, 41 BatchTimeout: 10 * time.Millisecond, 42 DisposeTimeout: 20 * time.Millisecond, 43 }, 44 }, &retry.Retry{ 45 InitialDelay: 1 * time.Microsecond, 46 MaximumDelay: 1 * time.Microsecond, 47 }) 48 return mdi, bp 49 } 50 51 func mockRunAsGroupPassthrough(mdi *databasemocks.Plugin) { 52 rag := mdi.On("RunAsGroup", mock.Anything, mock.Anything) 53 rag.RunFn = func(a mock.Arguments) { 54 fn := a[1].(func(context.Context) error) 55 rag.ReturnArguments = mock.Arguments{fn(a[0].(context.Context))} 56 } 57 } 58 59 func TestUnfilledBatch(t *testing.T) { 60 log.SetLevel("debug") 61 62 wg := sync.WaitGroup{} 63 wg.Add(2) 64 65 dispatched := []*fftypes.Batch{} 66 mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { 67 dispatched = append(dispatched, b) 68 wg.Done() 69 return nil 70 }) 71 mockRunAsGroupPassthrough(mdi) 72 mdi.On("UpdateMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil) 73 mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) 74 mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) 75 76 // Generate the work the work 77 work := make([]*batchWork, 5) 78 for i := 0; i < 5; i++ { 79 msgid := fftypes.NewUUID() 80 work[i] = &batchWork{ 81 msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid}}, 82 dispatched: make(chan *batchDispatch), 83 } 84 } 85 86 // Kick off a go routine to consume the confirmations 87 go func() { 88 for i := 0; i < 5; i++ { 89 <-work[i].dispatched 90 } 91 wg.Done() 92 }() 93 94 // Dispatch the work 95 for i := 0; i < 5; i++ { 96 bp.newWork <- work[i] 97 } 98 99 // Wait for the confirmations, and the dispatch 100 wg.Wait() 101 102 // Check we got all the messages in a single batch 103 assert.Equal(t, len(dispatched[0].Payload.Messages), 5) 104 105 bp.close() 106 bp.waitClosed() 107 108 } 109 110 func TestFilledBatchSlowPersistence(t *testing.T) { 111 log.SetLevel("debug") 112 113 wg := sync.WaitGroup{} 114 wg.Add(2) 115 116 dispatched := []*fftypes.Batch{} 117 mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { 118 dispatched = append(dispatched, b) 119 wg.Done() 120 return nil 121 }) 122 bp.conf.BatchTimeout = 1 * time.Hour // Must fill the batch 123 mockUpsert := mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything, mock.Anything) 124 mockUpsert.ReturnArguments = mock.Arguments{nil} 125 unblockPersistence := make(chan time.Time) 126 mockUpsert.WaitFor = unblockPersistence 127 mockRunAsGroupPassthrough(mdi) 128 mdi.On("UpdateMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil) 129 mdi.On("UpdateBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil) 130 131 // Generate the work the work 132 work := make([]*batchWork, 10) 133 for i := 0; i < 10; i++ { 134 msgid := fftypes.NewUUID() 135 if i%2 == 0 { 136 work[i] = &batchWork{ 137 msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid}}, 138 dispatched: make(chan *batchDispatch), 139 } 140 } else { 141 work[i] = &batchWork{ 142 data: []*fftypes.Data{{ID: msgid}}, 143 dispatched: make(chan *batchDispatch), 144 } 145 } 146 } 147 148 // Kick off a go routine to consume the confirmations 149 go func() { 150 for i := 0; i < 10; i++ { 151 <-work[i].dispatched 152 } 153 wg.Done() 154 }() 155 156 // Dispatch the work 157 for i := 0; i < 10; i++ { 158 bp.newWork <- work[i] 159 } 160 161 // Unblock the dispatch 162 time.Sleep(10 * time.Millisecond) 163 mockUpsert.WaitFor = nil 164 unblockPersistence <- time.Now() // First call to write the first entry in the batch 165 166 // Wait for comdiletion 167 wg.Wait() 168 169 // Check we got all the messages in a single batch 170 assert.Equal(t, len(dispatched[0].Payload.Messages), 5) 171 assert.Equal(t, len(dispatched[0].Payload.Data), 5) 172 173 bp.close() 174 bp.waitClosed() 175 176 } 177 178 func TestCloseToUnblockDispatch(t *testing.T) { 179 _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { 180 return fmt.Errorf("pop") 181 }) 182 bp.close() 183 bp.dispatchBatch(&fftypes.Batch{}, []*fftypes.Bytes32{}) 184 } 185 186 func TestCloseToUnblockUpsertBatch(t *testing.T) { 187 188 wg := sync.WaitGroup{} 189 wg.Add(1) 190 191 mdi, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { 192 return nil 193 }) 194 bp.retry.MaximumDelay = 1 * time.Microsecond 195 bp.conf.BatchTimeout = 100 * time.Second 196 mockRunAsGroupPassthrough(mdi) 197 mdi.On("UpdateMessages", mock.Anything, mock.Anything, mock.Anything).Return(nil) 198 mup := mdi.On("UpsertBatch", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) 199 waitForCall := make(chan bool) 200 mup.RunFn = func(a mock.Arguments) { 201 waitForCall <- true 202 <-waitForCall 203 } 204 205 // Generate the work the work 206 msgid := fftypes.NewUUID() 207 work := &batchWork{ 208 msg: &fftypes.Message{Header: fftypes.MessageHeader{ID: msgid}}, 209 dispatched: make(chan *batchDispatch), 210 } 211 212 // Dispatch the work 213 bp.newWork <- work 214 215 // Ensure the mock has been run 216 <-waitForCall 217 close(waitForCall) 218 219 // Close to unblock 220 bp.close() 221 bp.waitClosed() 222 223 } 224 225 func TestCalcPinsFail(t *testing.T) { 226 _, bp := newTestBatchProcessor(func(c context.Context, b *fftypes.Batch, s []*fftypes.Bytes32) error { 227 return nil 228 }) 229 defer bp.close() 230 mdi := bp.database.(*databasemocks.Plugin) 231 mdi.On("UpsertNonceNext", mock.Anything, mock.Anything).Return(fmt.Errorf("pop")) 232 233 gid := fftypes.NewRandB32() 234 _, err := bp.maskContexts(bp.ctx, &fftypes.Batch{ 235 Group: gid, 236 Payload: fftypes.BatchPayload{ 237 Messages: []*fftypes.Message{ 238 {Header: fftypes.MessageHeader{ 239 Group: gid, 240 Topics: fftypes.FFNameArray{"topic1"}, 241 }}, 242 }, 243 }, 244 }) 245 assert.Regexp(t, "pop", err) 246 }