go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/logdog/appengine/coordinator/endpoints/services/batch.go (about)

     1  // Copyright 2015 The LUCI Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package services
    16  
    17  import (
    18  	"context"
    19  	"sync"
    20  
    21  	"github.com/golang/protobuf/proto"
    22  	"google.golang.org/grpc/codes"
    23  	"google.golang.org/grpc/status"
    24  
    25  	logdog "go.chromium.org/luci/logdog/api/endpoints/coordinator/services/v1"
    26  
    27  	"go.chromium.org/luci/common/gcloud/gae"
    28  	"go.chromium.org/luci/common/logging"
    29  	"go.chromium.org/luci/common/sync/parallel"
    30  )
    31  
    32  // The maximum, AppEngine response size, minus 1MB for overhead.
    33  const maxResponseSize = gae.MaxResponseSize - (1024 * 1024) // 1MB
    34  
    35  func (s *server) Batch(c context.Context, req *logdog.BatchRequest) (*logdog.BatchResponse, error) {
    36  	// Pre-allocate our response array so we can populate it by index in
    37  	// parallel.
    38  	resp := logdog.BatchResponse{
    39  		Resp: make([]*logdog.BatchResponse_Entry, 0, len(req.Req)),
    40  	}
    41  	if len(req.Req) == 0 {
    42  		return &resp, nil
    43  	}
    44  
    45  	// Perform our batch operations in parallel. Each operation's error response
    46  	// will be encoded in the response's "Error" parameter, so this will never
    47  	// return an error.
    48  	//
    49  	// We will continue to populate the response buffer until it exceeds a size
    50  	// constraint. If it does, we will refrain from appending it.
    51  	var respMu sync.Mutex
    52  	var respSize int64
    53  	_ = parallel.WorkPool(8, func(workC chan<- func() error) {
    54  		for i, e := range req.Req {
    55  			i, e := i, e
    56  			workC <- func() error {
    57  				c := logging.SetField(c, "batchIndex", i)
    58  
    59  				r := logdog.BatchResponse_Entry{
    60  					Index: int32(i),
    61  				}
    62  
    63  				s.processBatchEntry(c, e, &r)
    64  				if err := r.GetErr(); err != nil {
    65  					logging.Fields{
    66  						"code":      err.GrpcCode,
    67  						"transient": err.Transient,
    68  					}.Errorf(c, "Failed batch entry.")
    69  				}
    70  
    71  				// See if this fits into our response.
    72  				size := int64(proto.Size(&r))
    73  
    74  				respMu.Lock()
    75  				defer respMu.Unlock()
    76  
    77  				if respSize+size > maxResponseSize {
    78  					logging.Warningf(c, "Response would exceed request size (%d > %d); discarding.", respSize+size, maxResponseSize)
    79  					return nil
    80  				}
    81  
    82  				resp.Resp = append(resp.Resp, &r)
    83  				respSize += size
    84  				return nil
    85  			}
    86  		}
    87  	})
    88  	return &resp, nil
    89  }
    90  
    91  func (s *server) processBatchEntry(c context.Context, e *logdog.BatchRequest_Entry, r *logdog.BatchResponse_Entry) {
    92  	enterSingleContext := func(c context.Context, req proto.Message, fn func(context.Context) error) error {
    93  		c, err := maybeEnterProjectNamespace(c, req)
    94  		if err != nil {
    95  			return err
    96  		}
    97  		return fn(c)
    98  	}
    99  
   100  	var err error
   101  	switch req := e.Value.(type) {
   102  	case *logdog.BatchRequest_Entry_RegisterStream:
   103  		var resp *logdog.RegisterStreamResponse
   104  		err = enterSingleContext(c, req.RegisterStream, func(c context.Context) (err error) {
   105  			if resp, err = s.RegisterStream(c, req.RegisterStream); err == nil {
   106  				r.Value = &logdog.BatchResponse_Entry_RegisterStream{RegisterStream: resp}
   107  			}
   108  			return
   109  		})
   110  
   111  	case *logdog.BatchRequest_Entry_LoadStream:
   112  		var resp *logdog.LoadStreamResponse
   113  		err = enterSingleContext(c, req.LoadStream, func(c context.Context) (err error) {
   114  			if resp, err = s.LoadStream(c, req.LoadStream); err == nil {
   115  				r.Value = &logdog.BatchResponse_Entry_LoadStream{LoadStream: resp}
   116  			}
   117  			return
   118  		})
   119  
   120  	case *logdog.BatchRequest_Entry_TerminateStream:
   121  		err = enterSingleContext(c, req.TerminateStream, func(c context.Context) (err error) {
   122  			_, err = s.TerminateStream(c, req.TerminateStream)
   123  			return
   124  		})
   125  
   126  	case *logdog.BatchRequest_Entry_ArchiveStream:
   127  		err = enterSingleContext(c, req.ArchiveStream, func(c context.Context) (err error) {
   128  			_, err = s.ArchiveStream(c, req.ArchiveStream)
   129  			return
   130  		})
   131  
   132  	default:
   133  		err = status.Error(codes.InvalidArgument, "unrecognized subrequest")
   134  	}
   135  
   136  	// If we encountered an error, populate value with an Error.
   137  	if err != nil {
   138  		r.Value = &logdog.BatchResponse_Entry_Err{Err: logdog.MakeError(err)}
   139  	}
   140  }