github.com/argoproj/argo-cd/v3@v3.2.1/controller/sharding/cache.go (about)

     1  package sharding
     2  
     3  import (
     4  	"sync"
     5  
     6  	log "github.com/sirupsen/logrus"
     7  
     8  	"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
     9  	"github.com/argoproj/argo-cd/v3/util/db"
    10  )
    11  
    12  type ClusterShardingCache interface {
    13  	Init(clusters *v1alpha1.ClusterList, apps *v1alpha1.ApplicationList)
    14  	Add(c *v1alpha1.Cluster)
    15  	Delete(clusterServer string)
    16  	Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster)
    17  	AddApp(a *v1alpha1.Application)
    18  	DeleteApp(a *v1alpha1.Application)
    19  	UpdateApp(a *v1alpha1.Application)
    20  	IsManagedCluster(c *v1alpha1.Cluster) bool
    21  	GetDistribution() map[string]int
    22  	GetAppDistribution() map[string]int
    23  	UpdateShard(shard int) bool
    24  }
    25  
    26  type ClusterSharding struct {
    27  	Shard           int
    28  	Replicas        int
    29  	Shards          map[string]int
    30  	Clusters        map[string]*v1alpha1.Cluster
    31  	Apps            map[string]*v1alpha1.Application
    32  	lock            sync.RWMutex
    33  	getClusterShard DistributionFunction
    34  }
    35  
    36  func NewClusterSharding(_ db.ArgoDB, shard, replicas int, shardingAlgorithm string) ClusterShardingCache {
    37  	log.Debugf("Processing clusters from shard %d: Using filter function:  %s", shard, shardingAlgorithm)
    38  	clusterSharding := &ClusterSharding{
    39  		Shard:    shard,
    40  		Replicas: replicas,
    41  		Shards:   make(map[string]int),
    42  		Clusters: make(map[string]*v1alpha1.Cluster),
    43  		Apps:     make(map[string]*v1alpha1.Application),
    44  	}
    45  	distributionFunction := NoShardingDistributionFunction()
    46  	if replicas > 1 {
    47  		log.Debugf("Processing clusters from shard %d: Using filter function:  %s", shard, shardingAlgorithm)
    48  		distributionFunction = GetDistributionFunction(clusterSharding.getClusterAccessor(), clusterSharding.getAppAccessor(), shardingAlgorithm, replicas)
    49  	} else {
    50  		log.Info("Processing all cluster shards")
    51  	}
    52  	clusterSharding.getClusterShard = distributionFunction
    53  	return clusterSharding
    54  }
    55  
    56  // IsManagedCluster returns whether or not the cluster should be processed by a given shard.
    57  func (sharding *ClusterSharding) IsManagedCluster(c *v1alpha1.Cluster) bool {
    58  	sharding.lock.RLock()
    59  	defer sharding.lock.RUnlock()
    60  	if c == nil { // nil cluster (in-cluster) is always managed by current clusterShard
    61  		return true
    62  	}
    63  	clusterShard := 0
    64  	if shard, ok := sharding.Shards[c.Server]; ok {
    65  		clusterShard = shard
    66  	} else {
    67  		log.Warnf("The cluster %s has no assigned shard.", c.Server)
    68  	}
    69  	log.Debugf("Checking if cluster %s with clusterShard %d should be processed by shard %d", c.Server, clusterShard, sharding.Shard)
    70  	return clusterShard == sharding.Shard
    71  }
    72  
    73  func (sharding *ClusterSharding) Init(clusters *v1alpha1.ClusterList, apps *v1alpha1.ApplicationList) {
    74  	sharding.lock.Lock()
    75  	defer sharding.lock.Unlock()
    76  	newClusters := make(map[string]*v1alpha1.Cluster, len(clusters.Items))
    77  	for _, c := range clusters.Items {
    78  		cluster := c
    79  		newClusters[c.Server] = &cluster
    80  	}
    81  	sharding.Clusters = newClusters
    82  
    83  	newApps := make(map[string]*v1alpha1.Application, len(apps.Items))
    84  	for i := range apps.Items {
    85  		app := apps.Items[i]
    86  		newApps[app.Name] = &app
    87  	}
    88  	sharding.Apps = newApps
    89  	sharding.updateDistribution()
    90  }
    91  
    92  func (sharding *ClusterSharding) Add(c *v1alpha1.Cluster) {
    93  	sharding.lock.Lock()
    94  	defer sharding.lock.Unlock()
    95  
    96  	old, ok := sharding.Clusters[c.Server]
    97  	sharding.Clusters[c.Server] = c
    98  	if !ok || hasShardingUpdates(old, c) {
    99  		sharding.updateDistribution()
   100  	} else {
   101  		log.Debugf("Skipping sharding distribution update. Cluster already added")
   102  	}
   103  }
   104  
   105  func (sharding *ClusterSharding) Delete(clusterServer string) {
   106  	sharding.lock.Lock()
   107  	defer sharding.lock.Unlock()
   108  	if _, ok := sharding.Clusters[clusterServer]; ok {
   109  		delete(sharding.Clusters, clusterServer)
   110  		delete(sharding.Shards, clusterServer)
   111  		sharding.updateDistribution()
   112  	}
   113  }
   114  
   115  func (sharding *ClusterSharding) Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster) {
   116  	sharding.lock.Lock()
   117  	defer sharding.lock.Unlock()
   118  
   119  	if _, ok := sharding.Clusters[oldCluster.Server]; ok && oldCluster.Server != newCluster.Server {
   120  		delete(sharding.Clusters, oldCluster.Server)
   121  		delete(sharding.Shards, oldCluster.Server)
   122  	}
   123  	sharding.Clusters[newCluster.Server] = newCluster
   124  	if hasShardingUpdates(oldCluster, newCluster) {
   125  		sharding.updateDistribution()
   126  	} else {
   127  		log.Debugf("Skipping sharding distribution update. No relevant changes")
   128  	}
   129  }
   130  
   131  func (sharding *ClusterSharding) GetDistribution() map[string]int {
   132  	sharding.lock.RLock()
   133  	defer sharding.lock.RUnlock()
   134  	shards := sharding.Shards
   135  
   136  	distribution := make(map[string]int, len(shards))
   137  	for k, v := range shards {
   138  		distribution[k] = v
   139  	}
   140  	return distribution
   141  }
   142  
   143  func (sharding *ClusterSharding) updateDistribution() {
   144  	for k, c := range sharding.Clusters {
   145  		shard := 0
   146  		if c.Shard != nil {
   147  			requestedShard := int(*c.Shard)
   148  			if requestedShard < sharding.Replicas {
   149  				shard = requestedShard
   150  			} else {
   151  				log.Warnf("Specified cluster shard (%d) for cluster: %s is greater than the number of available shard (%d). Using shard 0.", requestedShard, c.Server, sharding.Replicas)
   152  			}
   153  		} else {
   154  			shard = sharding.getClusterShard(c)
   155  		}
   156  
   157  		existingShard, ok := sharding.Shards[k]
   158  		switch {
   159  		case ok && existingShard != shard:
   160  			log.Infof("Cluster %s has changed shard from %d to %d", k, existingShard, shard)
   161  		case !ok:
   162  			log.Infof("Cluster %s has been assigned to shard %d", k, shard)
   163  		default:
   164  			log.Debugf("Cluster %s has not changed shard", k)
   165  		}
   166  		sharding.Shards[k] = shard
   167  	}
   168  }
   169  
   170  // hasShardingUpdates returns true if the sharding distribution has explicitly changed
   171  func hasShardingUpdates(old, newCluster *v1alpha1.Cluster) bool {
   172  	if old == nil || newCluster == nil {
   173  		return false
   174  	}
   175  
   176  	// returns true if the cluster id has changed because some sharding algorithms depend on it.
   177  	if old.ID != newCluster.ID {
   178  		return true
   179  	}
   180  
   181  	if old.Server != newCluster.Server {
   182  		return true
   183  	}
   184  
   185  	// return false if the shard field has not been modified
   186  	if old.Shard == nil && newCluster.Shard == nil {
   187  		return false
   188  	}
   189  	return old.Shard == nil || newCluster.Shard == nil || int64(*old.Shard) != int64(*newCluster.Shard)
   190  }
   191  
   192  // A read lock should be acquired before calling getClusterAccessor.
   193  func (sharding *ClusterSharding) getClusterAccessor() clusterAccessor {
   194  	return func() []*v1alpha1.Cluster {
   195  		// no need to lock, as this is only called from the updateDistribution function
   196  		clusters := make([]*v1alpha1.Cluster, 0, len(sharding.Clusters))
   197  		for _, c := range sharding.Clusters {
   198  			clusters = append(clusters, c)
   199  		}
   200  		return clusters
   201  	}
   202  }
   203  
   204  // A read lock should be acquired before calling getAppAccessor.
   205  func (sharding *ClusterSharding) getAppAccessor() appAccessor {
   206  	return func() []*v1alpha1.Application {
   207  		apps := make([]*v1alpha1.Application, 0, len(sharding.Apps))
   208  		for _, a := range sharding.Apps {
   209  			apps = append(apps, a)
   210  		}
   211  		return apps
   212  	}
   213  }
   214  
   215  func (sharding *ClusterSharding) AddApp(a *v1alpha1.Application) {
   216  	sharding.lock.Lock()
   217  	defer sharding.lock.Unlock()
   218  
   219  	_, ok := sharding.Apps[a.Name]
   220  	sharding.Apps[a.Name] = a
   221  	if !ok {
   222  		sharding.updateDistribution()
   223  	} else {
   224  		log.Debugf("Skipping sharding distribution update. App already added")
   225  	}
   226  }
   227  
   228  func (sharding *ClusterSharding) DeleteApp(a *v1alpha1.Application) {
   229  	sharding.lock.Lock()
   230  	defer sharding.lock.Unlock()
   231  	if _, ok := sharding.Apps[a.Name]; ok {
   232  		delete(sharding.Apps, a.Name)
   233  		sharding.updateDistribution()
   234  	}
   235  }
   236  
   237  func (sharding *ClusterSharding) UpdateApp(a *v1alpha1.Application) {
   238  	sharding.lock.Lock()
   239  	defer sharding.lock.Unlock()
   240  
   241  	_, ok := sharding.Apps[a.Name]
   242  	sharding.Apps[a.Name] = a
   243  	if !ok {
   244  		sharding.updateDistribution()
   245  	} else {
   246  		log.Debugf("Skipping sharding distribution update. No relevant changes")
   247  	}
   248  }
   249  
   250  // GetAppDistribution should be not be called from a DestributionFunction because
   251  // it could cause a deadlock when updateDistribution is called.
   252  func (sharding *ClusterSharding) GetAppDistribution() map[string]int {
   253  	sharding.lock.RLock()
   254  	clusters := sharding.Clusters
   255  	apps := sharding.Apps
   256  	sharding.lock.RUnlock()
   257  
   258  	appDistribution := make(map[string]int, len(clusters))
   259  
   260  	for _, a := range apps {
   261  		if _, ok := appDistribution[a.Spec.Destination.Server]; !ok {
   262  			appDistribution[a.Spec.Destination.Server] = 0
   263  		}
   264  		appDistribution[a.Spec.Destination.Server]++
   265  	}
   266  	return appDistribution
   267  }
   268  
   269  // UpdateShard will update the shard of ClusterSharding when the shard has changed.
   270  func (sharding *ClusterSharding) UpdateShard(shard int) bool {
   271  	if shard != sharding.Shard {
   272  		sharding.lock.RLock()
   273  		sharding.Shard = shard
   274  		sharding.lock.RUnlock()
   275  		return true
   276  	}
   277  	return false
   278  }