github.com/polarismesh/polaris@v1.17.8/cache/service/ratelimit_config.go (about)

     1  /**
     2   * Tencent is pleased to support the open source community by making Polaris available.
     3   *
     4   * Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
     5   *
     6   * Licensed under the BSD 3-Clause License (the "License");
     7   * you may not use this file except in compliance with the License.
     8   * You may obtain a copy of the License at
     9   *
    10   * https://opensource.org/licenses/BSD-3-Clause
    11   *
    12   * Unless required by applicable law or agreed to in writing, software distributed
    13   * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
    14   * CONDITIONS OF ANY KIND, either express or implied. See the License for the
    15   * specific language governing permissions and limitations under the License.
    16   */
    17  
    18  package service
    19  
    20  import (
    21  	"encoding/json"
    22  	"sync"
    23  	"time"
    24  
    25  	apitraffic "github.com/polarismesh/specification/source/go/api/v1/traffic_manage"
    26  	"golang.org/x/sync/singleflight"
    27  	"google.golang.org/protobuf/types/known/wrapperspb"
    28  
    29  	types "github.com/polarismesh/polaris/cache/api"
    30  	"github.com/polarismesh/polaris/common/model"
    31  	"github.com/polarismesh/polaris/common/utils"
    32  	"github.com/polarismesh/polaris/store"
    33  )
    34  
    35  // rateLimitCache的实现
    36  type rateLimitCache struct {
    37  	*types.BaseCache
    38  
    39  	lock         sync.RWMutex
    40  	waitFixRules map[string]struct{}
    41  	svcCache     types.ServiceCache
    42  	storage      store.Store
    43  	rules        *rateLimitRuleBucket
    44  	singleFlight singleflight.Group
    45  }
    46  
    47  // NewRateLimitCache 返回一个操作RateLimitCache的对象
    48  func NewRateLimitCache(s store.Store, cacheMgr types.CacheManager) types.RateLimitCache {
    49  	return &rateLimitCache{
    50  		BaseCache:    types.NewBaseCache(s, cacheMgr),
    51  		storage:      s,
    52  		waitFixRules: map[string]struct{}{},
    53  	}
    54  }
    55  
    56  // Initialize 实现Cache接口的initialize函数
    57  func (rlc *rateLimitCache) Initialize(_ map[string]interface{}) error {
    58  	rlc.rules = newRateLimitRuleBucket()
    59  	rlc.svcCache = rlc.CacheMgr.GetCacher(types.CacheService).(*serviceCache)
    60  	return nil
    61  }
    62  
    63  // Update 实现Cache接口的update函数
    64  func (rlc *rateLimitCache) Update() error {
    65  	// 多个线程竞争,只有一个线程进行更新
    66  	_, err, _ := rlc.singleFlight.Do(rlc.Name(), func() (interface{}, error) {
    67  		return nil, rlc.DoCacheUpdate(rlc.Name(), rlc.realUpdate)
    68  	})
    69  
    70  	return err
    71  }
    72  
    73  func (rlc *rateLimitCache) realUpdate() (map[string]time.Time, int64, error) {
    74  	rateLimits, err := rlc.storage.GetRateLimitsForCache(rlc.LastFetchTime(), rlc.IsFirstUpdate())
    75  	if err != nil {
    76  		log.Errorf("[Cache] rate limit cache update err: %s", err.Error())
    77  		return nil, -1, err
    78  	}
    79  	rlc.setRateLimit(rateLimits)
    80  	return nil, int64(len(rateLimits)), err
    81  }
    82  
    83  // Name 获取资源名称
    84  func (rlc *rateLimitCache) Name() string {
    85  	return types.RateLimitConfigName
    86  }
    87  
    88  // Clear 实现Cache接口的clear函数
    89  func (rlc *rateLimitCache) Clear() error {
    90  	rlc.BaseCache.Clear()
    91  	rlc.rules = newRateLimitRuleBucket()
    92  	return nil
    93  }
    94  
    95  func (rlc *rateLimitCache) rateLimitToProto(rateLimit *model.RateLimit) error {
    96  	rateLimit.Proto = &apitraffic.Rule{}
    97  	if len(rateLimit.Rule) == 0 {
    98  		return nil
    99  	}
   100  	// 反序列化rule
   101  	if err := json.Unmarshal([]byte(rateLimit.Rule), rateLimit.Proto); err != nil {
   102  		return err
   103  	}
   104  	rateLimit.Proto.Disable = wrapperspb.Bool(rateLimit.Disable)
   105  	namespace := rateLimit.Proto.GetNamespace().GetValue()
   106  	name := rateLimit.Proto.GetService().GetValue()
   107  	if namespace == "" || name == "" {
   108  		rlc.fixRuleServiceInfo(rateLimit)
   109  	}
   110  	return rateLimit.AdaptArgumentsAndLabels()
   111  }
   112  
   113  // setRateLimit 更新限流规则到缓存中
   114  func (rlc *rateLimitCache) setRateLimit(rateLimits []*model.RateLimit) map[string]time.Time {
   115  	if len(rateLimits) == 0 {
   116  		return nil
   117  	}
   118  	rlc.fixRulesServiceInfo()
   119  	updateService := map[model.ServiceKey]struct{}{}
   120  	lastMtime := rlc.LastMtime(rlc.Name()).Unix()
   121  	for _, item := range rateLimits {
   122  		if err := rlc.rateLimitToProto(item); nil != err {
   123  			log.Errorf("[Cache]fail to unmarshal rule to proto, err: %v", err)
   124  			continue
   125  		}
   126  		if item.ModifyTime.Unix() > lastMtime {
   127  			lastMtime = item.ModifyTime.Unix()
   128  		}
   129  
   130  		key := model.ServiceKey{
   131  			Namespace: item.Proto.GetNamespace().GetValue(),
   132  			Name:      item.Proto.GetService().GetValue(),
   133  		}
   134  		updateService[key] = struct{}{}
   135  
   136  		// 待删除的rateLimit
   137  		if !item.Valid {
   138  			rlc.rules.delRule(item)
   139  			rlc.deleteWaitFixRule(item)
   140  			continue
   141  		}
   142  		rlc.rules.saveRule(item)
   143  	}
   144  
   145  	for serviceKey := range updateService {
   146  		rlc.rules.reloadRevision(serviceKey)
   147  	}
   148  
   149  	return map[string]time.Time{
   150  		rlc.Name(): time.Unix(lastMtime, 0),
   151  	}
   152  }
   153  
   154  // IteratorRateLimit 根据serviceID进行迭代回调
   155  func (rlc *rateLimitCache) IteratorRateLimit(proc types.RateLimitIterProc) {
   156  	rlc.rules.foreach(proc)
   157  }
   158  
   159  // GetRateLimitByServiceID 根据serviceID获取限流数据
   160  func (rlc *rateLimitCache) GetRateLimitRules(serviceKey model.ServiceKey) ([]*model.RateLimit, string) {
   161  	rules, revision := rlc.rules.getRules(serviceKey)
   162  	return rules, revision
   163  }
   164  
   165  // GetRateLimitsCount 获取限流规则总数
   166  func (rlc *rateLimitCache) GetRateLimitsCount() int {
   167  	return rlc.rules.count()
   168  }
   169  
   170  func (rlc *rateLimitCache) deleteWaitFixRule(rule *model.RateLimit) {
   171  	rlc.lock.Lock()
   172  	defer rlc.lock.Unlock()
   173  	delete(rlc.waitFixRules, rule.ID)
   174  }
   175  
   176  func (rlc *rateLimitCache) fixRulesServiceInfo() {
   177  	rlc.lock.Lock()
   178  	defer rlc.lock.Unlock()
   179  	for id := range rlc.waitFixRules {
   180  		rule := rlc.rules.getRuleByID(id)
   181  		if rule == nil {
   182  			delete(rlc.waitFixRules, id)
   183  			continue
   184  		}
   185  		svcId := rule.ServiceID
   186  		svc := rlc.svcCache.GetServiceByID(svcId)
   187  		if svc == nil {
   188  			svc2, err := rlc.storage.GetServiceByID(svcId)
   189  			if err != nil {
   190  				continue
   191  			}
   192  			svc = svc2
   193  		}
   194  
   195  		rule.Proto.Namespace = utils.NewStringValue(svc.Namespace)
   196  		rule.Proto.Name = utils.NewStringValue(svc.Name)
   197  		delete(rlc.waitFixRules, rule.ID)
   198  	}
   199  }
   200  
   201  func (rlc *rateLimitCache) fixRuleServiceInfo(rateLimit *model.RateLimit) {
   202  	rlc.lock.Lock()
   203  	defer rlc.lock.Unlock()
   204  	svcId := rateLimit.ServiceID
   205  	svc := rlc.svcCache.GetServiceByID(svcId)
   206  	if svc == nil {
   207  		svc2, err := rlc.storage.GetServiceByID(svcId)
   208  		if err != nil {
   209  			rlc.waitFixRules[rateLimit.ID] = struct{}{}
   210  			return
   211  		}
   212  		if svc2 == nil {
   213  			// 存储层确实不存在,直接跳过
   214  			delete(rlc.waitFixRules, rateLimit.ID)
   215  			return
   216  		}
   217  		svc = svc2
   218  	}
   219  
   220  	rateLimit.Proto.Namespace = utils.NewStringValue(svc.Namespace)
   221  	rateLimit.Proto.Name = utils.NewStringValue(svc.Name)
   222  	delete(rlc.waitFixRules, rateLimit.ID)
   223  }