github.com/argoproj/argo-cd/v3@v3.2.1/controller/cache/cache_test.go (about)

     1  package cache
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"net"
     7  	"net/url"
     8  	"sync"
     9  	"testing"
    10  	"time"
    11  
    12  	corev1 "k8s.io/api/core/v1"
    13  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    14  
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    18  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    19  	"k8s.io/apimachinery/pkg/runtime/schema"
    20  
    21  	"github.com/argoproj/gitops-engine/pkg/cache"
    22  	"github.com/argoproj/gitops-engine/pkg/cache/mocks"
    23  	"github.com/argoproj/gitops-engine/pkg/health"
    24  	"github.com/argoproj/gitops-engine/pkg/utils/kube"
    25  	"github.com/stretchr/testify/mock"
    26  	"k8s.io/client-go/kubernetes/fake"
    27  
    28  	"github.com/argoproj/argo-cd/v3/common"
    29  	"github.com/argoproj/argo-cd/v3/controller/metrics"
    30  	"github.com/argoproj/argo-cd/v3/controller/sharding"
    31  	"github.com/argoproj/argo-cd/v3/pkg/apis/application"
    32  	appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
    33  	dbmocks "github.com/argoproj/argo-cd/v3/util/db/mocks"
    34  	argosettings "github.com/argoproj/argo-cd/v3/util/settings"
    35  )
    36  
    37  type netError string
    38  
    39  func (n netError) Error() string   { return string(n) }
    40  func (n netError) Timeout() bool   { return false }
    41  func (n netError) Temporary() bool { return false }
    42  
    43  func fixtures(data map[string]string, opts ...func(secret *corev1.Secret)) (*fake.Clientset, *argosettings.SettingsManager) {
    44  	cm := &corev1.ConfigMap{
    45  		ObjectMeta: metav1.ObjectMeta{
    46  			Name:      common.ArgoCDConfigMapName,
    47  			Namespace: "default",
    48  			Labels: map[string]string{
    49  				"app.kubernetes.io/part-of": "argocd",
    50  			},
    51  		},
    52  		Data: data,
    53  	}
    54  	secret := &corev1.Secret{
    55  		ObjectMeta: metav1.ObjectMeta{
    56  			Name:      common.ArgoCDSecretName,
    57  			Namespace: "default",
    58  			Labels: map[string]string{
    59  				"app.kubernetes.io/part-of": "argocd",
    60  			},
    61  		},
    62  		Data: map[string][]byte{},
    63  	}
    64  	for i := range opts {
    65  		opts[i](secret)
    66  	}
    67  	kubeClient := fake.NewClientset(cm, secret)
    68  	settingsManager := argosettings.NewSettingsManager(context.Background(), kubeClient, "default")
    69  
    70  	return kubeClient, settingsManager
    71  }
    72  
    73  func TestHandleModEvent_HasChanges(_ *testing.T) {
    74  	clusterCache := &mocks.ClusterCache{}
    75  	clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
    76  	clusterCache.On("EnsureSynced").Return(nil).Once()
    77  	db := &dbmocks.ArgoDB{}
    78  	db.On("GetApplicationControllerReplicas").Return(1)
    79  	clustersCache := liveStateCache{
    80  		clusters: map[string]cache.ClusterCache{
    81  			"https://mycluster": clusterCache,
    82  		},
    83  		clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
    84  	}
    85  
    86  	clustersCache.handleModEvent(&appv1.Cluster{
    87  		Server: "https://mycluster",
    88  		Config: appv1.ClusterConfig{Username: "foo"},
    89  	}, &appv1.Cluster{
    90  		Server:     "https://mycluster",
    91  		Config:     appv1.ClusterConfig{Username: "bar"},
    92  		Namespaces: []string{"default"},
    93  	})
    94  }
    95  
    96  func TestHandleModEvent_ClusterExcluded(t *testing.T) {
    97  	clusterCache := &mocks.ClusterCache{}
    98  	clusterCache.On("Invalidate", mock.Anything, mock.Anything).Return(nil).Once()
    99  	clusterCache.On("EnsureSynced").Return(nil).Once()
   100  	db := &dbmocks.ArgoDB{}
   101  	db.On("GetApplicationControllerReplicas").Return(1)
   102  	clustersCache := liveStateCache{
   103  		db:          nil,
   104  		appInformer: nil,
   105  		onObjectUpdated: func(_ map[string]bool, _ corev1.ObjectReference) {
   106  		},
   107  		settingsMgr:   &argosettings.SettingsManager{},
   108  		metricsServer: &metrics.MetricsServer{},
   109  		// returns a shard that never process any cluster
   110  		clusterSharding:  sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
   111  		resourceTracking: nil,
   112  		clusters:         map[string]cache.ClusterCache{"https://mycluster": clusterCache},
   113  		cacheSettings:    cacheSettings{},
   114  		lock:             sync.RWMutex{},
   115  	}
   116  
   117  	clustersCache.handleModEvent(&appv1.Cluster{
   118  		Server: "https://mycluster",
   119  		Config: appv1.ClusterConfig{Username: "foo"},
   120  	}, &appv1.Cluster{
   121  		Server:     "https://mycluster",
   122  		Config:     appv1.ClusterConfig{Username: "bar"},
   123  		Namespaces: []string{"default"},
   124  	})
   125  
   126  	assert.Len(t, clustersCache.clusters, 1)
   127  }
   128  
   129  func TestHandleModEvent_NoChanges(_ *testing.T) {
   130  	clusterCache := &mocks.ClusterCache{}
   131  	clusterCache.On("Invalidate", mock.Anything).Panic("should not invalidate")
   132  	clusterCache.On("EnsureSynced").Return(nil).Panic("should not re-sync")
   133  	db := &dbmocks.ArgoDB{}
   134  	db.On("GetApplicationControllerReplicas").Return(1)
   135  	clustersCache := liveStateCache{
   136  		clusters: map[string]cache.ClusterCache{
   137  			"https://mycluster": clusterCache,
   138  		},
   139  		clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
   140  	}
   141  
   142  	clustersCache.handleModEvent(&appv1.Cluster{
   143  		Server: "https://mycluster",
   144  		Config: appv1.ClusterConfig{Username: "bar"},
   145  	}, &appv1.Cluster{
   146  		Server: "https://mycluster",
   147  		Config: appv1.ClusterConfig{Username: "bar"},
   148  	})
   149  }
   150  
   151  func TestHandleAddEvent_ClusterExcluded(t *testing.T) {
   152  	db := &dbmocks.ArgoDB{}
   153  	db.On("GetApplicationControllerReplicas").Return(1)
   154  	clustersCache := liveStateCache{
   155  		clusters:        map[string]cache.ClusterCache{},
   156  		clusterSharding: sharding.NewClusterSharding(db, 0, 2, common.DefaultShardingAlgorithm),
   157  	}
   158  	clustersCache.handleAddEvent(&appv1.Cluster{
   159  		Server: "https://mycluster",
   160  		Config: appv1.ClusterConfig{Username: "bar"},
   161  	})
   162  
   163  	assert.Empty(t, clustersCache.clusters)
   164  }
   165  
   166  func TestHandleDeleteEvent_CacheDeadlock(t *testing.T) {
   167  	testCluster := &appv1.Cluster{
   168  		Server: "https://mycluster",
   169  		Config: appv1.ClusterConfig{Username: "bar"},
   170  	}
   171  	db := &dbmocks.ArgoDB{}
   172  	db.On("GetApplicationControllerReplicas").Return(1)
   173  	fakeClient := fake.NewClientset()
   174  	settingsMgr := argosettings.NewSettingsManager(t.Context(), fakeClient, "argocd")
   175  	liveStateCacheLock := sync.RWMutex{}
   176  	gitopsEngineClusterCache := &mocks.ClusterCache{}
   177  	clustersCache := liveStateCache{
   178  		clusters: map[string]cache.ClusterCache{
   179  			testCluster.Server: gitopsEngineClusterCache,
   180  		},
   181  		clusterSharding: sharding.NewClusterSharding(db, 0, 1, common.DefaultShardingAlgorithm),
   182  		settingsMgr:     settingsMgr,
   183  		// Set the lock here so we can reference it later
   184  		//nolint:govet // We need to overwrite here to have access to the lock
   185  		lock: liveStateCacheLock,
   186  	}
   187  	channel := make(chan string)
   188  	// Mocked lock held by the gitops-engine cluster cache
   189  	gitopsEngineClusterCacheLock := sync.Mutex{}
   190  	// Ensure completion of both EnsureSynced and Invalidate
   191  	ensureSyncedCompleted := sync.Mutex{}
   192  	invalidateCompleted := sync.Mutex{}
   193  	// Locks to force trigger condition during test
   194  	// Condition order:
   195  	//   EnsuredSynced -> Locks gitops-engine
   196  	//   handleDeleteEvent -> Locks liveStateCache
   197  	//   EnsureSynced via sync, newResource, populateResourceInfoHandler -> attempts to Lock liveStateCache
   198  	//   handleDeleteEvent via cluster.Invalidate -> attempts to Lock gitops-engine
   199  	handleDeleteWasCalled := sync.Mutex{}
   200  	engineHoldsEngineLock := sync.Mutex{}
   201  	ensureSyncedCompleted.Lock()
   202  	invalidateCompleted.Lock()
   203  	handleDeleteWasCalled.Lock()
   204  	engineHoldsEngineLock.Lock()
   205  
   206  	gitopsEngineClusterCache.On("EnsureSynced").Run(func(_ mock.Arguments) {
   207  		gitopsEngineClusterCacheLock.Lock()
   208  		t.Log("EnsureSynced: Engine has engine lock")
   209  		engineHoldsEngineLock.Unlock()
   210  		defer gitopsEngineClusterCacheLock.Unlock()
   211  		// Wait until handleDeleteEvent holds the liveStateCache lock
   212  		handleDeleteWasCalled.Lock()
   213  		// Try and obtain the liveStateCache lock
   214  		clustersCache.lock.Lock()
   215  		t.Log("EnsureSynced: Engine has LiveStateCache lock")
   216  		clustersCache.lock.Unlock()
   217  		ensureSyncedCompleted.Unlock()
   218  	}).Return(nil).Once()
   219  
   220  	gitopsEngineClusterCache.On("Invalidate").Run(func(_ mock.Arguments) {
   221  		// Allow EnsureSynced to continue now that we're in the deadlock condition
   222  		handleDeleteWasCalled.Unlock()
   223  		// Wait until gitops engine holds the gitops lock
   224  		// This prevents timing issues if we reach this point before EnsureSynced has obtained the lock
   225  		engineHoldsEngineLock.Lock()
   226  		t.Log("Invalidate: Engine has engine lock")
   227  		engineHoldsEngineLock.Unlock()
   228  		// Lock engine lock
   229  		gitopsEngineClusterCacheLock.Lock()
   230  		t.Log("Invalidate: Invalidate has engine lock")
   231  		gitopsEngineClusterCacheLock.Unlock()
   232  		invalidateCompleted.Unlock()
   233  	}).Return()
   234  	go func() {
   235  		// Start the gitops-engine lock holds
   236  		go func() {
   237  			err := gitopsEngineClusterCache.EnsureSynced()
   238  			if err != nil {
   239  				assert.Fail(t, err.Error())
   240  			}
   241  		}()
   242  		// Run in background
   243  		go clustersCache.handleDeleteEvent(testCluster.Server)
   244  		// Allow execution to continue on clusters cache call to trigger lock
   245  		ensureSyncedCompleted.Lock()
   246  		invalidateCompleted.Lock()
   247  		t.Log("Competing functions were able to obtain locks")
   248  		invalidateCompleted.Unlock()
   249  		ensureSyncedCompleted.Unlock()
   250  		channel <- "PASSED"
   251  	}()
   252  	select {
   253  	case str := <-channel:
   254  		assert.Equal(t, "PASSED", str, str)
   255  	case <-time.After(5 * time.Second):
   256  		assert.Fail(t, "Ended up in deadlock")
   257  	}
   258  }
   259  
   260  func TestIsRetryableError(t *testing.T) {
   261  	var (
   262  		tlsHandshakeTimeoutErr net.Error = netError("net/http: TLS handshake timeout")
   263  		ioTimeoutErr           net.Error = netError("i/o timeout")
   264  		connectionTimedout     net.Error = netError("connection timed out")
   265  		connectionReset        net.Error = netError("connection reset by peer")
   266  	)
   267  	t.Run("Nil", func(t *testing.T) {
   268  		assert.False(t, isRetryableError(nil))
   269  	})
   270  	t.Run("ResourceQuotaConflictErr", func(t *testing.T) {
   271  		assert.False(t, isRetryableError(apierrors.NewConflict(schema.GroupResource{}, "", nil)))
   272  		assert.True(t, isRetryableError(apierrors.NewConflict(schema.GroupResource{Group: "v1", Resource: "resourcequotas"}, "", nil)))
   273  	})
   274  	t.Run("ExceededQuotaErr", func(t *testing.T) {
   275  		assert.False(t, isRetryableError(apierrors.NewForbidden(schema.GroupResource{}, "", nil)))
   276  		assert.True(t, isRetryableError(apierrors.NewForbidden(schema.GroupResource{Group: "v1", Resource: "pods"}, "", errors.New("exceeded quota"))))
   277  	})
   278  	t.Run("TooManyRequestsDNS", func(t *testing.T) {
   279  		assert.True(t, isRetryableError(apierrors.NewTooManyRequests("", 0)))
   280  	})
   281  	t.Run("DNSError", func(t *testing.T) {
   282  		assert.True(t, isRetryableError(&net.DNSError{}))
   283  	})
   284  	t.Run("OpError", func(t *testing.T) {
   285  		assert.True(t, isRetryableError(&net.OpError{}))
   286  	})
   287  	t.Run("UnknownNetworkError", func(t *testing.T) {
   288  		assert.True(t, isRetryableError(net.UnknownNetworkError("")))
   289  	})
   290  	t.Run("ConnectionClosedErr", func(t *testing.T) {
   291  		assert.False(t, isRetryableError(&url.Error{Err: errors.New("")}))
   292  		assert.True(t, isRetryableError(&url.Error{Err: errors.New("Connection closed by foreign host")}))
   293  	})
   294  	t.Run("TLSHandshakeTimeout", func(t *testing.T) {
   295  		assert.True(t, isRetryableError(tlsHandshakeTimeoutErr))
   296  	})
   297  	t.Run("IOHandshakeTimeout", func(t *testing.T) {
   298  		assert.True(t, isRetryableError(ioTimeoutErr))
   299  	})
   300  	t.Run("ConnectionTimeout", func(t *testing.T) {
   301  		assert.True(t, isRetryableError(connectionTimedout))
   302  	})
   303  	t.Run("ConnectionReset", func(t *testing.T) {
   304  		assert.True(t, isRetryableError(connectionReset))
   305  	})
   306  }
   307  
   308  func Test_asResourceNode_owner_refs(t *testing.T) {
   309  	resNode := asResourceNode(&cache.Resource{
   310  		ResourceVersion: "",
   311  		Ref: corev1.ObjectReference{
   312  			APIVersion: "v1",
   313  		},
   314  		OwnerRefs: []metav1.OwnerReference{
   315  			{
   316  				APIVersion: "v1",
   317  				Kind:       "ConfigMap",
   318  				Name:       "cm-1",
   319  			},
   320  			{
   321  				APIVersion: "v1",
   322  				Kind:       "ConfigMap",
   323  				Name:       "cm-2",
   324  			},
   325  		},
   326  		CreationTimestamp: nil,
   327  		Info:              nil,
   328  		Resource:          nil,
   329  	})
   330  	expected := appv1.ResourceNode{
   331  		ResourceRef: appv1.ResourceRef{
   332  			Version: "v1",
   333  		},
   334  		ParentRefs: []appv1.ResourceRef{
   335  			{
   336  				Group:   "",
   337  				Kind:    "ConfigMap",
   338  				Version: "v1",
   339  				Name:    "cm-1",
   340  			},
   341  			{
   342  				Group:   "",
   343  				Kind:    "ConfigMap",
   344  				Version: "v1",
   345  				Name:    "cm-2",
   346  			},
   347  		},
   348  		Info:            nil,
   349  		NetworkingInfo:  nil,
   350  		ResourceVersion: "",
   351  		Images:          nil,
   352  		Health:          nil,
   353  		CreatedAt:       nil,
   354  	}
   355  	assert.Equal(t, expected, resNode)
   356  }
   357  
   358  func Test_getAppRecursive(t *testing.T) {
   359  	for _, tt := range []struct {
   360  		name     string
   361  		r        *cache.Resource
   362  		ns       map[kube.ResourceKey]*cache.Resource
   363  		wantName string
   364  		wantOK   assert.BoolAssertionFunc
   365  	}{
   366  		{
   367  			name: "ok: cm1->app1",
   368  			r: &cache.Resource{
   369  				Ref: corev1.ObjectReference{
   370  					Name: "cm1",
   371  				},
   372  				OwnerRefs: []metav1.OwnerReference{
   373  					{Name: "app1"},
   374  				},
   375  			},
   376  			ns: map[kube.ResourceKey]*cache.Resource{
   377  				kube.NewResourceKey("", "", "", "app1"): {
   378  					Info: &ResourceInfo{
   379  						AppName: "app1",
   380  					},
   381  				},
   382  			},
   383  			wantName: "app1",
   384  			wantOK:   assert.True,
   385  		},
   386  		{
   387  			name: "ok: cm1->cm2->app1",
   388  			r: &cache.Resource{
   389  				Ref: corev1.ObjectReference{
   390  					Name: "cm1",
   391  				},
   392  				OwnerRefs: []metav1.OwnerReference{
   393  					{Name: "cm2"},
   394  				},
   395  			},
   396  			ns: map[kube.ResourceKey]*cache.Resource{
   397  				kube.NewResourceKey("", "", "", "cm2"): {
   398  					Ref: corev1.ObjectReference{
   399  						Name: "cm2",
   400  					},
   401  					OwnerRefs: []metav1.OwnerReference{
   402  						{Name: "app1"},
   403  					},
   404  				},
   405  				kube.NewResourceKey("", "", "", "app1"): {
   406  					Info: &ResourceInfo{
   407  						AppName: "app1",
   408  					},
   409  				},
   410  			},
   411  			wantName: "app1",
   412  			wantOK:   assert.True,
   413  		},
   414  		{
   415  			name: "cm1->cm2->app1 & cm1->cm3->app1",
   416  			r: &cache.Resource{
   417  				Ref: corev1.ObjectReference{
   418  					Name: "cm1",
   419  				},
   420  				OwnerRefs: []metav1.OwnerReference{
   421  					{Name: "cm2"},
   422  					{Name: "cm3"},
   423  				},
   424  			},
   425  			ns: map[kube.ResourceKey]*cache.Resource{
   426  				kube.NewResourceKey("", "", "", "cm2"): {
   427  					Ref: corev1.ObjectReference{
   428  						Name: "cm2",
   429  					},
   430  					OwnerRefs: []metav1.OwnerReference{
   431  						{Name: "app1"},
   432  					},
   433  				},
   434  				kube.NewResourceKey("", "", "", "cm3"): {
   435  					Ref: corev1.ObjectReference{
   436  						Name: "cm3",
   437  					},
   438  					OwnerRefs: []metav1.OwnerReference{
   439  						{Name: "app1"},
   440  					},
   441  				},
   442  				kube.NewResourceKey("", "", "", "app1"): {
   443  					Info: &ResourceInfo{
   444  						AppName: "app1",
   445  					},
   446  				},
   447  			},
   448  			wantName: "app1",
   449  			wantOK:   assert.True,
   450  		},
   451  		{
   452  			// Nothing cycle.
   453  			// Issue #11699, fixed #12667.
   454  			name: "ok: cm1->cm2 & cm1->cm3->cm2 & cm1->cm3->app1",
   455  			r: &cache.Resource{
   456  				Ref: corev1.ObjectReference{
   457  					Name: "cm1",
   458  				},
   459  				OwnerRefs: []metav1.OwnerReference{
   460  					{Name: "cm2"},
   461  					{Name: "cm3"},
   462  				},
   463  			},
   464  			ns: map[kube.ResourceKey]*cache.Resource{
   465  				kube.NewResourceKey("", "", "", "cm2"): {
   466  					Ref: corev1.ObjectReference{
   467  						Name: "cm2",
   468  					},
   469  				},
   470  				kube.NewResourceKey("", "", "", "cm3"): {
   471  					Ref: corev1.ObjectReference{
   472  						Name: "cm3",
   473  					},
   474  					OwnerRefs: []metav1.OwnerReference{
   475  						{Name: "cm2"},
   476  						{Name: "app1"},
   477  					},
   478  				},
   479  				kube.NewResourceKey("", "", "", "app1"): {
   480  					Info: &ResourceInfo{
   481  						AppName: "app1",
   482  					},
   483  				},
   484  			},
   485  			wantName: "app1",
   486  			wantOK:   assert.True,
   487  		},
   488  		{
   489  			name: "cycle: cm1<->cm2",
   490  			r: &cache.Resource{
   491  				Ref: corev1.ObjectReference{
   492  					Name: "cm1",
   493  				},
   494  				OwnerRefs: []metav1.OwnerReference{
   495  					{Name: "cm2"},
   496  				},
   497  			},
   498  			ns: map[kube.ResourceKey]*cache.Resource{
   499  				kube.NewResourceKey("", "", "", "cm1"): {
   500  					Ref: corev1.ObjectReference{
   501  						Name: "cm1",
   502  					},
   503  					OwnerRefs: []metav1.OwnerReference{
   504  						{Name: "cm2"},
   505  					},
   506  				},
   507  				kube.NewResourceKey("", "", "", "cm2"): {
   508  					Ref: corev1.ObjectReference{
   509  						Name: "cm2",
   510  					},
   511  					OwnerRefs: []metav1.OwnerReference{
   512  						{Name: "cm1"},
   513  					},
   514  				},
   515  			},
   516  			wantName: "",
   517  			wantOK:   assert.False,
   518  		},
   519  		{
   520  			name: "cycle: cm1->cm2->cm3->cm1",
   521  			r: &cache.Resource{
   522  				Ref: corev1.ObjectReference{
   523  					Name: "cm1",
   524  				},
   525  				OwnerRefs: []metav1.OwnerReference{
   526  					{Name: "cm2"},
   527  				},
   528  			},
   529  			ns: map[kube.ResourceKey]*cache.Resource{
   530  				kube.NewResourceKey("", "", "", "cm1"): {
   531  					Ref: corev1.ObjectReference{
   532  						Name: "cm1",
   533  					},
   534  					OwnerRefs: []metav1.OwnerReference{
   535  						{Name: "cm2"},
   536  					},
   537  				},
   538  				kube.NewResourceKey("", "", "", "cm2"): {
   539  					Ref: corev1.ObjectReference{
   540  						Name: "cm2",
   541  					},
   542  					OwnerRefs: []metav1.OwnerReference{
   543  						{Name: "cm3"},
   544  					},
   545  				},
   546  				kube.NewResourceKey("", "", "", "cm3"): {
   547  					Ref: corev1.ObjectReference{
   548  						Name: "cm3",
   549  					},
   550  					OwnerRefs: []metav1.OwnerReference{
   551  						{Name: "cm1"},
   552  					},
   553  				},
   554  			},
   555  			wantName: "",
   556  			wantOK:   assert.False,
   557  		},
   558  	} {
   559  		t.Run(tt.name, func(t *testing.T) {
   560  			visited := map[kube.ResourceKey]bool{}
   561  			got, ok := getAppRecursive(tt.r, tt.ns, visited)
   562  			assert.Equal(t, tt.wantName, got)
   563  			tt.wantOK(t, ok)
   564  		})
   565  	}
   566  }
   567  
   568  func TestSkipResourceUpdate(t *testing.T) {
   569  	var (
   570  		hash1X = "x"
   571  		hash2Y = "y"
   572  		hash3X = "x"
   573  	)
   574  	info := &ResourceInfo{
   575  		manifestHash: hash1X,
   576  		Health: &health.HealthStatus{
   577  			Status:  health.HealthStatusHealthy,
   578  			Message: "default",
   579  		},
   580  	}
   581  	t.Run("Nil", func(t *testing.T) {
   582  		assert.False(t, skipResourceUpdate(nil, nil))
   583  	})
   584  	t.Run("From Nil", func(t *testing.T) {
   585  		assert.False(t, skipResourceUpdate(nil, info))
   586  	})
   587  	t.Run("To Nil", func(t *testing.T) {
   588  		assert.False(t, skipResourceUpdate(info, nil))
   589  	})
   590  	t.Run("No hash", func(t *testing.T) {
   591  		assert.False(t, skipResourceUpdate(&ResourceInfo{}, &ResourceInfo{}))
   592  	})
   593  	t.Run("Same hash", func(t *testing.T) {
   594  		assert.True(t, skipResourceUpdate(&ResourceInfo{
   595  			manifestHash: hash1X,
   596  		}, &ResourceInfo{
   597  			manifestHash: hash1X,
   598  		}))
   599  	})
   600  	t.Run("Same hash value", func(t *testing.T) {
   601  		assert.True(t, skipResourceUpdate(&ResourceInfo{
   602  			manifestHash: hash1X,
   603  		}, &ResourceInfo{
   604  			manifestHash: hash3X,
   605  		}))
   606  	})
   607  	t.Run("Different hash value", func(t *testing.T) {
   608  		assert.False(t, skipResourceUpdate(&ResourceInfo{
   609  			manifestHash: hash1X,
   610  		}, &ResourceInfo{
   611  			manifestHash: hash2Y,
   612  		}))
   613  	})
   614  	t.Run("Same hash, empty health", func(t *testing.T) {
   615  		assert.True(t, skipResourceUpdate(&ResourceInfo{
   616  			manifestHash: hash1X,
   617  			Health:       &health.HealthStatus{},
   618  		}, &ResourceInfo{
   619  			manifestHash: hash3X,
   620  			Health:       &health.HealthStatus{},
   621  		}))
   622  	})
   623  	t.Run("Same hash, old health", func(t *testing.T) {
   624  		assert.False(t, skipResourceUpdate(&ResourceInfo{
   625  			manifestHash: hash1X,
   626  			Health: &health.HealthStatus{
   627  				Status: health.HealthStatusHealthy,
   628  			},
   629  		}, &ResourceInfo{
   630  			manifestHash: hash3X,
   631  			Health:       nil,
   632  		}))
   633  	})
   634  	t.Run("Same hash, new health", func(t *testing.T) {
   635  		assert.False(t, skipResourceUpdate(&ResourceInfo{
   636  			manifestHash: hash1X,
   637  			Health:       &health.HealthStatus{},
   638  		}, &ResourceInfo{
   639  			manifestHash: hash3X,
   640  			Health: &health.HealthStatus{
   641  				Status: health.HealthStatusHealthy,
   642  			},
   643  		}))
   644  	})
   645  	t.Run("Same hash, same health", func(t *testing.T) {
   646  		assert.True(t, skipResourceUpdate(&ResourceInfo{
   647  			manifestHash: hash1X,
   648  			Health: &health.HealthStatus{
   649  				Status:  health.HealthStatusHealthy,
   650  				Message: "same",
   651  			},
   652  		}, &ResourceInfo{
   653  			manifestHash: hash3X,
   654  			Health: &health.HealthStatus{
   655  				Status:  health.HealthStatusHealthy,
   656  				Message: "same",
   657  			},
   658  		}))
   659  	})
   660  	t.Run("Same hash, different health status", func(t *testing.T) {
   661  		assert.False(t, skipResourceUpdate(&ResourceInfo{
   662  			manifestHash: hash1X,
   663  			Health: &health.HealthStatus{
   664  				Status:  health.HealthStatusHealthy,
   665  				Message: "same",
   666  			},
   667  		}, &ResourceInfo{
   668  			manifestHash: hash3X,
   669  			Health: &health.HealthStatus{
   670  				Status:  health.HealthStatusDegraded,
   671  				Message: "same",
   672  			},
   673  		}))
   674  	})
   675  	t.Run("Same hash, different health message", func(t *testing.T) {
   676  		assert.True(t, skipResourceUpdate(&ResourceInfo{
   677  			manifestHash: hash1X,
   678  			Health: &health.HealthStatus{
   679  				Status:  health.HealthStatusHealthy,
   680  				Message: "same",
   681  			},
   682  		}, &ResourceInfo{
   683  			manifestHash: hash3X,
   684  			Health: &health.HealthStatus{
   685  				Status:  health.HealthStatusHealthy,
   686  				Message: "different",
   687  			},
   688  		}))
   689  	})
   690  }
   691  
   692  func TestShouldHashManifest(t *testing.T) {
   693  	tests := []struct {
   694  		name        string
   695  		appName     string
   696  		gvk         schema.GroupVersionKind
   697  		un          *unstructured.Unstructured
   698  		annotations map[string]string
   699  		want        bool
   700  	}{
   701  		{
   702  			name:    "appName not empty gvk matches",
   703  			appName: "MyApp",
   704  			gvk:     schema.GroupVersionKind{Group: application.Group, Kind: application.ApplicationKind},
   705  			un:      &unstructured.Unstructured{},
   706  			want:    true,
   707  		},
   708  		{
   709  			name:    "appName empty",
   710  			appName: "",
   711  			gvk:     schema.GroupVersionKind{Group: application.Group, Kind: application.ApplicationKind},
   712  			un:      &unstructured.Unstructured{},
   713  			want:    true,
   714  		},
   715  		{
   716  			name:    "appName empty group not match",
   717  			appName: "",
   718  			gvk:     schema.GroupVersionKind{Group: "group1", Kind: application.ApplicationKind},
   719  			un:      &unstructured.Unstructured{},
   720  			want:    false,
   721  		},
   722  		{
   723  			name:    "appName empty kind not match",
   724  			appName: "",
   725  			gvk:     schema.GroupVersionKind{Group: application.Group, Kind: "kind1"},
   726  			un:      &unstructured.Unstructured{},
   727  			want:    false,
   728  		},
   729  		{
   730  			name:        "argocd.argoproj.io/ignore-resource-updates=true",
   731  			appName:     "",
   732  			gvk:         schema.GroupVersionKind{Group: application.Group, Kind: "kind1"},
   733  			un:          &unstructured.Unstructured{},
   734  			annotations: map[string]string{"argocd.argoproj.io/ignore-resource-updates": "true"},
   735  			want:        true,
   736  		},
   737  		{
   738  			name:        "argocd.argoproj.io/ignore-resource-updates=invalid",
   739  			appName:     "",
   740  			gvk:         schema.GroupVersionKind{Group: application.Group, Kind: "kind1"},
   741  			un:          &unstructured.Unstructured{},
   742  			annotations: map[string]string{"argocd.argoproj.io/ignore-resource-updates": "invalid"},
   743  			want:        false,
   744  		},
   745  		{
   746  			name:        "argocd.argoproj.io/ignore-resource-updates=false",
   747  			appName:     "",
   748  			gvk:         schema.GroupVersionKind{Group: application.Group, Kind: "kind1"},
   749  			un:          &unstructured.Unstructured{},
   750  			annotations: map[string]string{"argocd.argoproj.io/ignore-resource-updates": "false"},
   751  			want:        false,
   752  		},
   753  	}
   754  
   755  	for _, test := range tests {
   756  		t.Run(test.name, func(t *testing.T) {
   757  			if test.annotations != nil {
   758  				test.un.SetAnnotations(test.annotations)
   759  			}
   760  			got := shouldHashManifest(test.appName, test.gvk, test.un)
   761  			require.Equalf(t, test.want, got, "test=%v", test.name)
   762  		})
   763  	}
   764  }
   765  
   766  func Test_GetVersionsInfo_error_redacted(t *testing.T) {
   767  	c := liveStateCache{}
   768  	cluster := &appv1.Cluster{
   769  		Server: "https://localhost:1234",
   770  		Config: appv1.ClusterConfig{
   771  			Username: "admin",
   772  			Password: "password",
   773  		},
   774  	}
   775  	_, _, err := c.GetVersionsInfo(cluster)
   776  	require.Error(t, err)
   777  	assert.NotContains(t, err.Error(), "password")
   778  }
   779  
   780  func TestLoadCacheSettings(t *testing.T) {
   781  	_, settingsManager := fixtures(map[string]string{
   782  		"application.instanceLabelKey":       "testLabel",
   783  		"application.resourceTrackingMethod": string(appv1.TrackingMethodLabel),
   784  		"installationID":                     "123456789",
   785  	})
   786  	ch := liveStateCache{
   787  		settingsMgr: settingsManager,
   788  	}
   789  	label, err := settingsManager.GetAppInstanceLabelKey()
   790  	require.NoError(t, err)
   791  	trackingMethod, err := settingsManager.GetTrackingMethod()
   792  	require.NoError(t, err)
   793  	res, err := ch.loadCacheSettings()
   794  	require.NoError(t, err)
   795  
   796  	assert.Equal(t, label, res.appInstanceLabelKey)
   797  	assert.Equal(t, string(appv1.TrackingMethodLabel), trackingMethod)
   798  	assert.Equal(t, "123456789", res.installationID)
   799  
   800  	// By default the values won't be nil
   801  	assert.NotNil(t, res.resourceOverrides)
   802  	assert.NotNil(t, res.clusterSettings)
   803  	assert.True(t, res.ignoreResourceUpdatesEnabled)
   804  }
   805  
   806  func Test_ownerRefGV(t *testing.T) {
   807  	tests := []struct {
   808  		name     string
   809  		input    metav1.OwnerReference
   810  		expected schema.GroupVersion
   811  	}{
   812  		{
   813  			name: "valid API Version",
   814  			input: metav1.OwnerReference{
   815  				APIVersion: "apps/v1",
   816  			},
   817  			expected: schema.GroupVersion{
   818  				Group:   "apps",
   819  				Version: "v1",
   820  			},
   821  		},
   822  		{
   823  			name: "custom defined version",
   824  			input: metav1.OwnerReference{
   825  				APIVersion: "custom-version",
   826  			},
   827  			expected: schema.GroupVersion{
   828  				Version: "custom-version",
   829  				Group:   "",
   830  			},
   831  		},
   832  		{
   833  			name: "empty APIVersion",
   834  			input: metav1.OwnerReference{
   835  				APIVersion: "",
   836  			},
   837  			expected: schema.GroupVersion{},
   838  		},
   839  	}
   840  
   841  	for _, tt := range tests {
   842  		t.Run(tt.name, func(t *testing.T) {
   843  			res := ownerRefGV(tt.input)
   844  			assert.Equal(t, tt.expected, res)
   845  		})
   846  	}
   847  }