go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/logdog/server/bundleServicesClient/client.go (about) 1 // Copyright 2017 The LUCI Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package bundleServicesClient 16 17 import ( 18 "context" 19 "sync" 20 "time" 21 22 "go.chromium.org/luci/common/errors" 23 "go.chromium.org/luci/common/gcloud/gae" 24 "go.chromium.org/luci/common/logging" 25 s "go.chromium.org/luci/logdog/api/endpoints/coordinator/services/v1" 26 27 "github.com/golang/protobuf/proto" 28 "google.golang.org/api/support/bundler" 29 "google.golang.org/grpc" 30 "google.golang.org/protobuf/types/known/emptypb" 31 ) 32 33 // The maximum, AppEngine request size, minus 1MB for overhead. 34 const maxBundleSize = gae.MaxRequestSize - (1024 * 1024) // 1MB 35 36 // Client is a LogDog Coordinator Services endpoint client that intercepts 37 // calls that can be batched and buffers them, sending them with the Batch 38 // RPC instead of their independent individual RPCs. 39 // 40 // The Context and CallOption set for the first intercepted call will be used 41 // when making the batch call; all other CallOption sets will be ignored. 42 // 43 // Bundling parameters can be controlled by modifying the Bundler prior to 44 // invoking it. 45 type Client struct { 46 // ServicesClient is the Coordinator Services endpoint Client that is being 47 // wrapped. 48 s.ServicesClient 49 50 // Starting from the time that the first message is added to a bundle, once 51 // this delay has passed, handle the bundle. 52 DelayThreshold time.Duration 53 54 // Once a bundle has this many items, handle the bundle. Since only one 55 // item at a time is added to a bundle, no bundle will exceed this 56 // threshold, so it also serves as a limit. 57 BundleCountThreshold int 58 59 initBundlerOnce sync.Once 60 bundler *bundler.Bundler 61 62 // outstanding is used to track outstanding RPCs. On Flush, the Client will 63 // block pending completion of all outstanding RPCs. 64 outstanding sync.WaitGroup 65 } 66 67 // RegisterStream implements ServicesClient. 68 func (c *Client) RegisterStream(ctx context.Context, in *s.RegisterStreamRequest, opts ...grpc.CallOption) ( 69 *s.RegisterStreamResponse, error) { 70 71 resp, err := c.bundleRPC(ctx, opts, &s.BatchRequest_Entry{ 72 Value: &s.BatchRequest_Entry_RegisterStream{RegisterStream: in}, 73 }) 74 if err != nil { 75 return nil, err 76 } 77 78 return resp.GetRegisterStream(), nil 79 } 80 81 // LoadStream implements ServicesClient. 82 func (c *Client) LoadStream(ctx context.Context, in *s.LoadStreamRequest, opts ...grpc.CallOption) ( 83 *s.LoadStreamResponse, error) { 84 85 resp, err := c.bundleRPC(ctx, opts, &s.BatchRequest_Entry{ 86 Value: &s.BatchRequest_Entry_LoadStream{LoadStream: in}, 87 }) 88 if err != nil { 89 return nil, err 90 } 91 92 return resp.GetLoadStream(), nil 93 } 94 95 // TerminateStream implements ServicesClient. 96 func (c *Client) TerminateStream(ctx context.Context, in *s.TerminateStreamRequest, opts ...grpc.CallOption) ( 97 *emptypb.Empty, error) { 98 99 _, err := c.bundleRPC(ctx, opts, &s.BatchRequest_Entry{ 100 Value: &s.BatchRequest_Entry_TerminateStream{TerminateStream: in}, 101 }) 102 if err != nil { 103 return nil, err 104 } 105 106 return &emptypb.Empty{}, nil 107 } 108 109 // ArchiveStream implements ServicesClient. 110 func (c *Client) ArchiveStream(ctx context.Context, in *s.ArchiveStreamRequest, opts ...grpc.CallOption) ( 111 *emptypb.Empty, error) { 112 113 _, err := c.bundleRPC(ctx, opts, &s.BatchRequest_Entry{ 114 Value: &s.BatchRequest_Entry_ArchiveStream{ArchiveStream: in}, 115 }) 116 if err != nil { 117 return nil, err 118 } 119 120 return &emptypb.Empty{}, nil 121 } 122 123 // Flush flushes the Bundler. It should be called when terminating to ensure 124 // that buffered client requests have been completed. 125 func (c *Client) Flush() { 126 c.initBundler() 127 c.bundler.Flush() 128 c.outstanding.Wait() 129 } 130 131 func (c *Client) initBundler() { 132 c.initBundlerOnce.Do(func() { 133 c.bundler = bundler.NewBundler(&batchEntry{}, c.bundlerHandler) 134 135 c.bundler.DelayThreshold = c.DelayThreshold 136 c.bundler.BundleCountThreshold = c.BundleCountThreshold 137 c.bundler.BundleByteThreshold = maxBundleSize // Hard-coded. 138 }) 139 } 140 141 // bundleRPC adds req to the underlying Bundler, blocks until it completes, and 142 // returns its response. 143 func (c *Client) bundleRPC(ctx context.Context, opts []grpc.CallOption, req *s.BatchRequest_Entry) (*s.BatchResponse_Entry, error) { 144 c.initBundler() 145 146 be := &batchEntry{ 147 req: req, 148 ctx: ctx, 149 opts: opts, 150 complete: make(chan *s.BatchResponse_Entry, 1), 151 } 152 if err := c.addEntry(be); err != nil { 153 return nil, err 154 } 155 156 resp := <-be.complete 157 if e := resp.GetErr(); e != nil { 158 return nil, e.ToError() 159 } 160 return resp, nil 161 } 162 163 func (c *Client) addEntry(be *batchEntry) error { 164 return c.bundler.Add(be, proto.Size(be.req)) 165 } 166 167 // bundleHandler is called when a bundle threshold has been met. 168 // 169 // This is a bundler.Bundler handler function. "iface" is []*batchEntry{}, a 170 // slice of the prototype passed into NewBundler. 171 // 172 // Note that "iface" is owned by this handler; the Bundler allocates a new 173 // slice after each bundle dispatch. Therefore, retention and mutation are safe. 174 func (c *Client) bundlerHandler(iface any) { 175 entries := iface.([]*batchEntry) 176 if len(entries) == 0 { 177 return 178 } 179 180 ctx, opts := entries[0].ctx, entries[0].opts 181 182 c.outstanding.Add(1) 183 go func() { 184 defer c.outstanding.Done() 185 c.sendBundle(ctx, entries, opts...) 186 }() 187 } 188 189 func (c *Client) sendBundle(ctx context.Context, entries []*batchEntry, opts ...grpc.CallOption) { 190 req := s.BatchRequest{ 191 Req: make([]*s.BatchRequest_Entry, len(entries)), 192 } 193 for i, ent := range entries { 194 req.Req[i] = ent.req 195 } 196 197 resp, err := c.ServicesClient.Batch(ctx, &req, opts...) 198 199 // Supply a response to each blocking request. Note that "complete" is a 200 // buffered channel, so this will not block. 201 if err != nil { 202 logging.WithError(err).Errorf(ctx, "Failed to send RPC bundle.") 203 204 // Error case: generate an error response from "err". 205 for _, ent := range entries { 206 e := s.MakeError(err) 207 ent.complete <- &s.BatchResponse_Entry{ 208 Value: &s.BatchResponse_Entry_Err{Err: e}, 209 } 210 } 211 return 212 } 213 214 // We don't have a solution for a case where the Coordinator couldn't provide 215 // a single response. We would infinitely continue retrying our initial 216 // request set. 217 // 218 // This shouldn't happen, but if it does, make it visible. 219 if len(resp.Resp) == 0 { 220 panic(errors.New("batch response had zero entries")) 221 } 222 223 // Pair each response with its request. 224 count := 0 225 for _, r := range resp.Resp { 226 // Handle error conditions. 227 switch { 228 case r.Index < 0, int(r.Index) >= len(entries): 229 logging.Warningf(ctx, "Response included invalid index %d (%d entries).", r.Index, len(entries)) 230 continue 231 232 case entries[r.Index] == nil: 233 logging.Warningf(ctx, "Response included duplicate entry for index %d.", r.Index) 234 continue 235 } 236 237 entries[r.Index].complete <- r 238 entries[r.Index] = nil 239 count++ 240 } 241 242 // Fast path: if our count equals the number of entries, then we've processed 243 // them all. 244 if count == len(entries) { 245 return 246 } 247 248 // Figure out which entries we didn't process and resubmit. 249 count = 0 250 for _, be := range entries { 251 if be == nil { 252 // Already processed. 253 continue 254 } 255 256 if err := c.addEntry(be); err != nil { 257 // This was already added successfully, so it can't fail here. 258 panic(errors.Annotate(err, "failed to re-add entry").Err()) 259 } 260 count++ 261 } 262 logging.Debugf(ctx, "Resubmitting %d unprocessed entr[y|ies].", count) 263 } 264 265 type batchEntry struct { 266 req *s.BatchRequest_Entry 267 268 ctx context.Context 269 opts []grpc.CallOption 270 271 complete chan *s.BatchResponse_Entry 272 }