go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/server/bqlog/bundler_test.go (about) 1 // Copyright 2021 The LUCI Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package bqlog 16 17 import ( 18 "context" 19 "sync" 20 "sync/atomic" 21 "testing" 22 "time" 23 24 "cloud.google.com/go/bigquery/storage/apiv1/storagepb" 25 "google.golang.org/grpc/codes" 26 "google.golang.org/grpc/status" 27 "google.golang.org/protobuf/types/known/durationpb" 28 "google.golang.org/protobuf/types/known/timestamppb" 29 30 "go.chromium.org/luci/common/tsmon" 31 "go.chromium.org/luci/common/tsmon/store" 32 "go.chromium.org/luci/common/tsmon/target" 33 "go.chromium.org/luci/common/tsmon/types" 34 35 . "github.com/smartystreets/goconvey/convey" 36 ) 37 38 func init() { 39 batchSizeMaxBytes = 50 40 defaultBatchAgeMax = time.Hour // ~infinity, cut by size 41 defaultMaxLiveSizeBytes = 200 42 } 43 44 func TestBundler(t *testing.T) { 45 t.Parallel() 46 47 Convey("With bundler", t, func() { 48 ctx := context.Background() 49 ctx, _, _ = tsmon.WithFakes(ctx) 50 tsmon.GetState(ctx).SetStore(store.NewInMemory(&target.Task{})) 51 52 counter := func(m types.Metric, fieldVals ...any) int64 { 53 val, _ := tsmon.GetState(ctx).Store().Get(ctx, m, time.Time{}, fieldVals).(int64) 54 return val 55 } 56 57 var m sync.Mutex 58 wrote := map[string]int{} 59 60 writer := &FakeBigQueryWriter{ 61 Send: func(r *storagepb.AppendRowsRequest) error { 62 m.Lock() 63 defer m.Unlock() 64 wrote[r.WriteStream] += len(r.GetProtoRows().Rows.SerializedRows) 65 return nil 66 }, 67 } 68 69 b := Bundler{ 70 CloudProject: "project", 71 Dataset: "dataset", 72 } 73 b.RegisterSink(Sink{ 74 Prototype: &durationpb.Duration{}, 75 Table: "durations", 76 }) 77 b.RegisterSink(Sink{ 78 Prototype: ×tamppb.Timestamp{}, 79 Table: "timestamps", 80 }) 81 82 Convey("Start+drain empty", func() { 83 b.Start(ctx, writer) 84 b.Shutdown(ctx) 85 So(wrote, ShouldHaveLength, 0) 86 }) 87 88 Convey("Start+send+drain OK", func() { 89 b.Start(ctx, writer) 90 b.Log(ctx, &durationpb.Duration{Seconds: 1}) 91 b.Log(ctx, &durationpb.Duration{Seconds: 2}) 92 b.Log(ctx, ×tamppb.Timestamp{Seconds: 1}) 93 b.Log(ctx, ×tamppb.Timestamp{Seconds: 2}) 94 b.Log(ctx, ×tamppb.Timestamp{Seconds: 3}) 95 b.Shutdown(ctx) 96 97 So(wrote, ShouldResemble, map[string]int{ 98 "projects/project/datasets/dataset/tables/durations/_default": 2, 99 "projects/project/datasets/dataset/tables/timestamps/_default": 3, 100 }) 101 102 So(counter(metricSentCounter, "project.dataset.durations"), ShouldEqual, 2) 103 So(counter(metricSentCounter, "project.dataset.timestamps"), ShouldEqual, 3) 104 }) 105 106 Convey("Drops rows on fatal errors", func() { 107 writer.Recv = func() (*storagepb.AppendRowsResponse, error) { 108 return nil, status.Errorf(codes.InvalidArgument, "Bad") 109 } 110 111 b.Start(ctx, writer) 112 b.Log(ctx, &durationpb.Duration{Seconds: 1}) 113 b.Log(ctx, ×tamppb.Timestamp{Seconds: 1}) 114 b.Shutdown(ctx) 115 116 So(counter(metricSentCounter, "project.dataset.durations"), ShouldEqual, 0) 117 So(counter(metricSentCounter, "project.dataset.timestamps"), ShouldEqual, 0) 118 So(counter(metricDroppedCounter, "project.dataset.durations", "DISPATCHER"), ShouldEqual, 1) 119 So(counter(metricDroppedCounter, "project.dataset.timestamps", "DISPATCHER"), ShouldEqual, 1) 120 So(counter(metricErrorsCounter, "project.dataset.durations", "INVALID_ARGUMENT"), ShouldEqual, 1) 121 So(counter(metricErrorsCounter, "project.dataset.timestamps", "INVALID_ARGUMENT"), ShouldEqual, 1) 122 }) 123 124 Convey("Batching and dropping excesses", func() { 125 countdown := int64(2) 126 batchLen := make(chan int) 127 128 writer.Send = func(r *storagepb.AppendRowsRequest) error { 129 if atomic.AddInt64(&countdown, -1) >= 0 { 130 batchLen <- len(r.GetProtoRows().Rows.SerializedRows) 131 return nil 132 } 133 return status.Errorf(codes.Internal, "Closed") 134 } 135 136 b.Start(ctx, writer) 137 for i := 0; i < 1000; i++ { 138 b.Log(ctx, &durationpb.Duration{Seconds: int64(i)}) 139 } 140 141 // Make sure we get small batches. 142 So(<-batchLen, ShouldEqual, 25) 143 So(<-batchLen, ShouldEqual, 16) 144 145 // Quickly drop the rest by shutting down without waiting. 146 ctx, cancel := context.WithCancel(ctx) 147 cancel() 148 b.Shutdown(ctx) 149 150 So(counter(metricSentCounter, "project.dataset.durations"), ShouldEqual, 25+16) 151 So(counter(metricDroppedCounter, "project.dataset.durations", "DISPATCHER"), ShouldBeGreaterThan, 0) 152 }) 153 }) 154 }