github.com/matrixorigin/matrixone@v1.2.0/pkg/proxy/scaling.go (about)

     1  // Copyright 2021 - 2023 Matrix Origin
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package proxy
    16  
    17  import (
    18  	"context"
    19  	"time"
    20  
    21  	"github.com/matrixorigin/matrixone/pkg/clusterservice"
    22  	"github.com/matrixorigin/matrixone/pkg/common/log"
    23  	"github.com/matrixorigin/matrixone/pkg/pb/metadata"
    24  	v2 "github.com/matrixorigin/matrixone/pkg/util/metric/v2"
    25  	"go.uber.org/zap"
    26  )
    27  
    28  const defaultScalingInterval = 3 * time.Second
    29  
    30  type scaling struct {
    31  	logger *log.MOLogger
    32  	// How often we check the scaling state, the default value
    33  	// is defaultScalingInterval.
    34  	interval time.Duration
    35  	// disabled is the scaling worker state. It is the same as rebalancer.
    36  	disabled bool
    37  	// we get connection information from it.
    38  	connManager *connManager
    39  	// mc is MO-Cluster instance, which is used to get CN servers.
    40  	mc clusterservice.MOCluster
    41  	// queue is the transfer queue, which is the same queue as rebalancer.
    42  	queue chan *tunnel
    43  }
    44  
    45  func newScaling(
    46  	cm *connManager, queue chan *tunnel, mc clusterservice.MOCluster, logger *log.MOLogger, disabled bool,
    47  ) *scaling {
    48  	return &scaling{
    49  		interval:    defaultScalingInterval,
    50  		logger:      logger,
    51  		disabled:    disabled,
    52  		connManager: cm,
    53  		queue:       queue,
    54  		mc:          mc,
    55  	}
    56  }
    57  
    58  func (s *scaling) run(ctx context.Context) {
    59  	ticker := time.NewTicker(s.interval)
    60  	defer ticker.Stop()
    61  	for {
    62  		select {
    63  		case <-ticker.C:
    64  			s.doScaling()
    65  		case <-ctx.Done():
    66  			s.logger.Info("scaling runner ended")
    67  			return
    68  		}
    69  	}
    70  }
    71  
    72  func (s *scaling) doScaling() {
    73  	if s.disabled {
    74  		return
    75  	}
    76  	drainingCNs := make([]string, 0, 100)
    77  	s.mc.GetCNService(clusterservice.NewSelectAll(), func(s metadata.CNService) bool {
    78  		if isDraining(s) {
    79  			drainingCNs = append(drainingCNs, s.ServiceID)
    80  		}
    81  		return true
    82  	})
    83  	v2.ProxyDrainCounter.Add(float64(len(drainingCNs)))
    84  	for _, cn := range drainingCNs {
    85  		tuns := s.connManager.getTunnelsByCNID(cn)
    86  		tunNum := len(tuns)
    87  		if tunNum == 0 {
    88  			continue
    89  		}
    90  		s.logger.Info("transferring tunnels on CN",
    91  			zap.Int("tunnel number", len(tuns)),
    92  			zap.String("CN ID", cn),
    93  		)
    94  		for _, tun := range tuns {
    95  			tun.setTransferType(transferByScaling)
    96  			select {
    97  			case s.queue <- tun:
    98  			default:
    99  				// Reset the transfer type to default value.
   100  				tun.setTransferType(transferByRebalance)
   101  
   102  				s.logger.Info("rebalance queue is full")
   103  			}
   104  		}
   105  	}
   106  }
   107  
   108  func isDraining(s metadata.CNService) bool {
   109  	return s.WorkState == metadata.WorkState_Draining
   110  }