github.com/hxx258456/ccgo@v0.0.5-0.20230213014102-48b35f46f66f/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go (about)

     1  /*
     2   *
     3   * Copyright 2020 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  // Package loadstore contains the loadStoreWrapper shared by the balancers.
    20  package loadstore
    21  
    22  import (
    23  	"sync"
    24  
    25  	"github.com/hxx258456/ccgo/grpc/xds/internal/xdsclient/load"
    26  )
    27  
    28  // NewWrapper creates a Wrapper.
    29  func NewWrapper() *Wrapper {
    30  	return &Wrapper{}
    31  }
    32  
    33  // Wrapper wraps a load store with cluster and edsService.
    34  //
    35  // It's store and cluster/edsService can be updated separately. And it will
    36  // update its internal perCluster store so that new stats will be added to the
    37  // correct perCluster.
    38  //
    39  // Note that this struct is a temporary walkaround before we implement graceful
    40  // switch for EDS. Any update to the clusterName and serviceName is too early,
    41  // the perfect timing is when the picker is updated with the new connection.
    42  // This early update could cause picks for the old SubConn being reported to the
    43  // new services.
    44  //
    45  // When the graceful switch in EDS is done, there should be no need for this
    46  // struct. The policies that record/report load shouldn't need to handle update
    47  // of lrsServerName/cluster/edsService. Its parent should do a graceful switch
    48  // of the whole tree when one of that changes.
    49  type Wrapper struct {
    50  	mu         sync.RWMutex
    51  	cluster    string
    52  	edsService string
    53  	// store and perCluster are initialized as nil. They are only set by the
    54  	// balancer when LRS is enabled. Before that, all functions to record loads
    55  	// are no-op.
    56  	store      *load.Store
    57  	perCluster load.PerClusterReporter
    58  }
    59  
    60  // UpdateClusterAndService updates the cluster name and eds service for this
    61  // wrapper. If any one of them is changed from before, the perCluster store in
    62  // this wrapper will also be updated.
    63  func (lsw *Wrapper) UpdateClusterAndService(cluster, edsService string) {
    64  	lsw.mu.Lock()
    65  	defer lsw.mu.Unlock()
    66  	if cluster == lsw.cluster && edsService == lsw.edsService {
    67  		return
    68  	}
    69  	lsw.cluster = cluster
    70  	lsw.edsService = edsService
    71  	lsw.perCluster = lsw.store.PerCluster(lsw.cluster, lsw.edsService)
    72  }
    73  
    74  // UpdateLoadStore updates the load store for this wrapper. If it is changed
    75  // from before, the perCluster store in this wrapper will also be updated.
    76  func (lsw *Wrapper) UpdateLoadStore(store *load.Store) {
    77  	lsw.mu.Lock()
    78  	defer lsw.mu.Unlock()
    79  	if store == lsw.store {
    80  		return
    81  	}
    82  	lsw.store = store
    83  	lsw.perCluster = lsw.store.PerCluster(lsw.cluster, lsw.edsService)
    84  }
    85  
    86  // CallStarted records a call started in the store.
    87  func (lsw *Wrapper) CallStarted(locality string) {
    88  	lsw.mu.RLock()
    89  	defer lsw.mu.RUnlock()
    90  	if lsw.perCluster != nil {
    91  		lsw.perCluster.CallStarted(locality)
    92  	}
    93  }
    94  
    95  // CallFinished records a call finished in the store.
    96  func (lsw *Wrapper) CallFinished(locality string, err error) {
    97  	lsw.mu.RLock()
    98  	defer lsw.mu.RUnlock()
    99  	if lsw.perCluster != nil {
   100  		lsw.perCluster.CallFinished(locality, err)
   101  	}
   102  }
   103  
   104  // CallServerLoad records the server load in the store.
   105  func (lsw *Wrapper) CallServerLoad(locality, name string, val float64) {
   106  	lsw.mu.RLock()
   107  	defer lsw.mu.RUnlock()
   108  	if lsw.perCluster != nil {
   109  		lsw.perCluster.CallServerLoad(locality, name, val)
   110  	}
   111  }
   112  
   113  // CallDropped records a call dropped in the store.
   114  func (lsw *Wrapper) CallDropped(category string) {
   115  	lsw.mu.RLock()
   116  	defer lsw.mu.RUnlock()
   117  	if lsw.perCluster != nil {
   118  		lsw.perCluster.CallDropped(category)
   119  	}
   120  }