github.phpd.cn/cilium/cilium@v1.6.12/pkg/k8s/cnp_test.go (about)

     1  // Copyright 2019 Authors of Cilium
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // +build !privileged_tests
    16  
    17  package k8s
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"fmt"
    23  	"k8s.io/client-go/kubernetes"
    24  	"os"
    25  	go_runtime "runtime"
    26  	"strconv"
    27  	"sync"
    28  	"time"
    29  
    30  	"github.com/cilium/cilium/pkg/checker"
    31  	"github.com/cilium/cilium/pkg/defaults"
    32  	"github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
    33  	clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
    34  	"github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake"
    35  	informer "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions"
    36  	"github.com/cilium/cilium/pkg/k8s/types"
    37  	k8sversion "github.com/cilium/cilium/pkg/k8s/version"
    38  	"github.com/cilium/cilium/pkg/logging"
    39  	"github.com/cilium/cilium/pkg/logging/logfields"
    40  	"github.com/cilium/cilium/pkg/policy/api"
    41  
    42  	"github.com/sirupsen/logrus"
    43  	. "gopkg.in/check.v1"
    44  	apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
    45  	"k8s.io/apimachinery/pkg/api/errors"
    46  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    47  	"k8s.io/apimachinery/pkg/runtime"
    48  	"k8s.io/apimachinery/pkg/util/wait"
    49  	k8sTesting "k8s.io/client-go/testing"
    50  	"k8s.io/client-go/tools/cache"
    51  )
    52  
    53  type K8sIntegrationSuite struct{}
    54  
    55  var _ = Suite(&K8sIntegrationSuite{})
    56  
    57  func (k *K8sIntegrationSuite) SetUpSuite(c *C) {
    58  	if true {
    59  		logging.DefaultLogger.SetLevel(logrus.PanicLevel)
    60  		log = logging.DefaultLogger.WithField(logfields.LogSubsys, subsysK8s)
    61  	}
    62  	if os.Getenv("INTEGRATION") != "" {
    63  		if k8sConfigPath := os.Getenv("KUBECONFIG"); k8sConfigPath == "" {
    64  			Configure("", "/var/lib/cilium/cilium.kubeconfig", defaults.K8sClientQPSLimit, defaults.K8sClientBurst)
    65  		} else {
    66  			Configure("", k8sConfigPath, defaults.K8sClientQPSLimit, defaults.K8sClientBurst)
    67  		}
    68  		restConfig, err := CreateConfig()
    69  		c.Assert(err, IsNil)
    70  		apiextensionsclientset, err := apiextensionsclient.NewForConfig(restConfig)
    71  		c.Assert(err, IsNil)
    72  		err = v2.CreateCustomResourceDefinitions(apiextensionsclientset)
    73  		c.Assert(err, IsNil)
    74  
    75  		client, err := clientset.NewForConfig(restConfig)
    76  		c.Assert(err, IsNil)
    77  		client.CiliumV2().CiliumNetworkPolicies("default").Delete("testing-policy", &metav1.DeleteOptions{})
    78  	}
    79  }
    80  
    81  func testUpdateCNPNodeStatusK8s(integrationTest bool, k8sVersion string, c *C) {
    82  	// For k8s <v1.13
    83  	// the unit tests will perform 3 actions, A, B and C where:
    84  	// A-1.10) update k8s1 node status
    85  	//    this will make 1 attempt as it is the first node populating status
    86  	// B-1.10) update k8s2 node status
    87  	//    this will make 3 attempts
    88  	// C-1.10) update k8s1 node status with revision=2 and enforcing=false
    89  	//    this will make 3 attempts
    90  	// the code paths for A-1.10, B-1.10 and C-1.10 can be found in the comments
    91  
    92  	// For k8s >=v1.13
    93  	// the unit tests will perform 3 actions, A, B and C where:
    94  	// A-1.13) update k8s1 node status
    95  	//         this will make 1 attempt as it is the first node populating status
    96  	// B-1.13) update k8s2 node status
    97  	//         this will make 2 attempts
    98  	// C-1.13) update k8s1 node status with revision=2 and enforcing=false
    99  	//         this will make 2 attempts
   100  	// the code paths for A-1.13, B-1.13 and C-1.13 can be found in the comments
   101  
   102  	err := k8sversion.Force(k8sVersion)
   103  	c.Assert(err, IsNil)
   104  
   105  	cnp := &types.SlimCNP{
   106  		CiliumNetworkPolicy: &v2.CiliumNetworkPolicy{
   107  			TypeMeta: metav1.TypeMeta{
   108  				Kind:       "CiliumNetworkPolicy",
   109  				APIVersion: "cilium.io/v2",
   110  			},
   111  			ObjectMeta: metav1.ObjectMeta{
   112  				Name:      "testing-policy",
   113  				Namespace: "default",
   114  			},
   115  			Spec: &api.Rule{
   116  				EndpointSelector: api.EndpointSelector{
   117  					LabelSelector: &metav1.LabelSelector{
   118  						MatchLabels: map[string]string{
   119  							"foo": "bar",
   120  						},
   121  					},
   122  				},
   123  			},
   124  		},
   125  	}
   126  
   127  	wantedCNP := cnp.DeepCopy()
   128  
   129  	wantedCNPS := v2.CiliumNetworkPolicyStatus{
   130  		Nodes: map[string]v2.CiliumNetworkPolicyNodeStatus{
   131  			"k8s1": {
   132  				Enforcing:   true,
   133  				Revision:    1,
   134  				OK:          true,
   135  				LastUpdated: v2.Timestamp{},
   136  				Annotations: map[string]string{
   137  					"foo":                            "bar",
   138  					"i-will-disappear-in-2nd-update": "bar",
   139  				},
   140  			},
   141  			"k8s2": {
   142  				Enforcing:   true,
   143  				Revision:    2,
   144  				OK:          true,
   145  				LastUpdated: v2.Timestamp{},
   146  			},
   147  		},
   148  	}
   149  
   150  	wantedCNP.Status = wantedCNPS
   151  
   152  	var ciliumNPClient clientset.Interface
   153  	if integrationTest {
   154  		restConfig, err := CreateConfig()
   155  		c.Assert(err, IsNil)
   156  		ciliumNPClient, err = clientset.NewForConfig(restConfig)
   157  		c.Assert(err, IsNil)
   158  		cnp.CiliumNetworkPolicy, err = ciliumNPClient.CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()).Create(cnp.CiliumNetworkPolicy)
   159  		c.Assert(err, IsNil)
   160  		defer func() {
   161  			err = ciliumNPClient.CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()).Delete(cnp.GetName(), &metav1.DeleteOptions{})
   162  			c.Assert(err, IsNil)
   163  		}()
   164  	} else {
   165  		ciliumNPClientFake := &fake.Clientset{}
   166  		ciliumNPClientFake.AddReactor("patch", "ciliumnetworkpolicies",
   167  			func(action k8sTesting.Action) (bool, runtime.Object, error) {
   168  				pa := action.(k8sTesting.PatchAction)
   169  				time.Sleep(1 * time.Millisecond)
   170  				var receivedJsonPatch []JSONPatch
   171  				err := json.Unmarshal(pa.GetPatch(), &receivedJsonPatch)
   172  				c.Assert(err, IsNil)
   173  
   174  				switch {
   175  				case receivedJsonPatch[0].OP == "test" && receivedJsonPatch[0].Path == "/status":
   176  					switch {
   177  					case receivedJsonPatch[0].Value == nil:
   178  						cnpns := receivedJsonPatch[1].Value.(map[string]interface{})
   179  						nodes := cnpns["nodes"].(map[string]interface{})
   180  						if nodes["k8s1"] == nil {
   181  							// codepath B-1.10) and B-1.13) 1st attempt
   182  							// This is an attempt from k8s2 so we need
   183  							// to return an error because `/status` is not nil as
   184  							// it was previously set by k8s1
   185  							return true, nil, &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonInvalid}}
   186  						}
   187  						// codepath A-1.10), C-1.10), A-1.13) and C-1.13)
   188  						n := nodes["k8s1"].(map[string]interface{})
   189  
   190  						if n["localPolicyRevision"].(float64) == 2 {
   191  							// codepath C-1.10) and C-1.13) 1st attempt
   192  							// This is an attempt from k8s1 to update its status
   193  							// again, return an error because `/status` is not nil
   194  							// as it was previously set by k8s1
   195  							return true, nil, &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonInvalid}}
   196  						}
   197  						// codepath A-1.10) and A-1.13)
   198  
   199  						// Ignore lastUpdated timestamp as it will mess up with
   200  						// the deepequals
   201  						n["lastUpdated"] = "0001-01-01T00:00:00Z"
   202  
   203  						// Remove k8s2 from the nodes status.
   204  						cnpsK8s1 := wantedCNPS.DeepCopy()
   205  						delete(cnpsK8s1.Nodes, "k8s2")
   206  						createStatusAndNodePatch := []JSONPatch{
   207  							{
   208  								OP:    "test",
   209  								Path:  "/status",
   210  								Value: nil,
   211  							},
   212  							{
   213  								OP:    "add",
   214  								Path:  "/status",
   215  								Value: cnpsK8s1,
   216  							},
   217  						}
   218  						expectedJSONPatchBytes, err := json.Marshal(createStatusAndNodePatch)
   219  						c.Assert(err, IsNil)
   220  						var expectedJSONPatch []JSONPatch
   221  						err = json.Unmarshal(expectedJSONPatchBytes, &expectedJSONPatch)
   222  						c.Assert(err, IsNil)
   223  
   224  						c.Assert(receivedJsonPatch, checker.DeepEquals, expectedJSONPatch)
   225  
   226  						// Copy the status the the cnp so we can compare it at
   227  						// the end of this test to make sure everything is alright.
   228  						cnp.Status = *cnpsK8s1
   229  						return true, cnp.CiliumNetworkPolicy, nil
   230  
   231  					case receivedJsonPatch[0].Value != nil:
   232  						// codepath B-1.10) and C-1.10) 2nd attempt
   233  						// k8s1 and k8s2 knows that `/status` exists and was created
   234  						// by a different node so he just needs to add itself to
   235  						// the list of nodes.
   236  						// "Unfortunately" the list of node is not-empty so
   237  						// the test value of `/status` needs to fail
   238  						c.Assert(cnp.Status.Nodes, Not(Equals), 0)
   239  						return true, nil, &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonInvalid}}
   240  					}
   241  				case receivedJsonPatch[0].OP == "replace":
   242  					// codepath B-1.13) and C-1.13) 2nd attempt
   243  					fallthrough
   244  				case receivedJsonPatch[0].OP == "add":
   245  					cnpns := receivedJsonPatch[0].Value.(map[string]interface{})
   246  					// codepath B-1.10) and C-1.10) 3rd attempt
   247  					// k8s2 knows that `/status` exists and was created
   248  					// by a different node so he just needs to add itself to
   249  					// the list of nodes.
   250  					if len(cnp.Status.Nodes) == 1 {
   251  						// codepath B-1.10) 3rd attempt
   252  						// k8s1 knows that `/status` exists and was populated
   253  						// by a different node so he just needs to add (update)
   254  						// itself to the list of nodes.
   255  						// Ignore lastUpdated timestamp as it will mess up with
   256  						// the deepequals
   257  						cnpns["lastUpdated"] = "0001-01-01T00:00:00Z"
   258  
   259  						// Remove k8s1 from the nodes status.
   260  						cnpsK8s2 := wantedCNPS.DeepCopy()
   261  						delete(cnpsK8s2.Nodes, "k8s1")
   262  
   263  						createStatusAndNodePatch := []JSONPatch{
   264  							{
   265  								OP:    receivedJsonPatch[0].OP,
   266  								Path:  "/status/nodes/k8s2",
   267  								Value: cnpsK8s2.Nodes["k8s2"],
   268  							},
   269  						}
   270  						expectedJSONPatchBytes, err := json.Marshal(createStatusAndNodePatch)
   271  						c.Assert(err, IsNil)
   272  						var expectedJSONPatch []JSONPatch
   273  						err = json.Unmarshal(expectedJSONPatchBytes, &expectedJSONPatch)
   274  						c.Assert(err, IsNil)
   275  
   276  						c.Assert(receivedJsonPatch, checker.DeepEquals, expectedJSONPatch)
   277  
   278  						cnp.Status.Nodes["k8s2"] = cnpsK8s2.Nodes["k8s2"]
   279  						return true, cnp.CiliumNetworkPolicy, nil
   280  					}
   281  					// codepath C-1.10) 3rd attempt
   282  					cnpns["lastUpdated"] = "0001-01-01T00:00:00Z"
   283  
   284  					// Remove k8s2 from the nodes status.
   285  					cnpsK8s1 := wantedCNPS.DeepCopy()
   286  					delete(cnpsK8s1.Nodes, "k8s2")
   287  					// This update from k8s1 should have enforcing=false and
   288  					// revision=2
   289  					nWanted := cnpsK8s1.Nodes["k8s1"]
   290  					nWanted.Revision = 2
   291  					nWanted.Enforcing = false
   292  					cnpsK8s1.Nodes["k8s1"] = nWanted
   293  
   294  					createStatusAndNodePatch := []JSONPatch{
   295  						{
   296  							OP:    receivedJsonPatch[0].OP,
   297  							Path:  "/status/nodes/k8s1",
   298  							Value: nWanted,
   299  						},
   300  					}
   301  					expectedJSONPatchBytes, err := json.Marshal(createStatusAndNodePatch)
   302  					c.Assert(err, IsNil)
   303  					var expectedJSONPatch []JSONPatch
   304  					err = json.Unmarshal(expectedJSONPatchBytes, &expectedJSONPatch)
   305  					c.Assert(err, IsNil)
   306  
   307  					c.Assert(receivedJsonPatch, checker.DeepEquals, expectedJSONPatch)
   308  
   309  					cnp.Status.Nodes["k8s1"] = cnpsK8s1.Nodes["k8s1"]
   310  					return true, cnp.CiliumNetworkPolicy, nil
   311  				}
   312  				// should never reach this point
   313  				c.FailNow()
   314  				return true, nil, fmt.Errorf("should not been called")
   315  			})
   316  		ciliumNPClient = ciliumNPClientFake
   317  	}
   318  
   319  	updateContext := &CNPStatusUpdateContext{
   320  		CiliumNPClient: ciliumNPClient,
   321  		NodeName:       "k8s1",
   322  	}
   323  
   324  	cnpns := wantedCNPS.Nodes["k8s1"]
   325  	err = updateContext.update(cnp, cnpns.Enforcing, cnpns.OK, err, cnpns.Revision, cnpns.Annotations)
   326  	c.Assert(err, IsNil)
   327  
   328  	if integrationTest {
   329  		cnp.CiliumNetworkPolicy, err = ciliumNPClient.CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()).Get(cnp.GetName(), metav1.GetOptions{})
   330  		c.Assert(err, IsNil)
   331  	}
   332  
   333  	updateContext = &CNPStatusUpdateContext{
   334  		CiliumNPClient: ciliumNPClient,
   335  		NodeName:       "k8s2",
   336  	}
   337  
   338  	cnpns = wantedCNPS.Nodes["k8s2"]
   339  	err = updateContext.update(cnp, cnpns.Enforcing, cnpns.OK, err, cnpns.Revision, cnpns.Annotations)
   340  	c.Assert(err, IsNil)
   341  
   342  	if integrationTest {
   343  		cnp.CiliumNetworkPolicy, err = ciliumNPClient.CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()).Get(cnp.GetName(), metav1.GetOptions{})
   344  		c.Assert(err, IsNil)
   345  
   346  		// Ignore timestamps
   347  		n := cnp.Status.Nodes["k8s1"]
   348  		n.LastUpdated = v2.Timestamp{}
   349  		cnp.Status.Nodes["k8s1"] = n
   350  		n = cnp.Status.Nodes["k8s2"]
   351  		n.LastUpdated = v2.Timestamp{}
   352  		cnp.Status.Nodes["k8s2"] = n
   353  
   354  		c.Assert(cnp.Status, checker.DeepEquals, wantedCNP.Status)
   355  	} else {
   356  		c.Assert(cnp.Status, checker.DeepEquals, wantedCNP.Status)
   357  	}
   358  
   359  	n := wantedCNP.Status.Nodes["k8s1"]
   360  	n.Revision = 2
   361  	n.Enforcing = false
   362  	n.Annotations = map[string]string{
   363  		"foo": "bar",
   364  	}
   365  	wantedCNP.Status.Nodes["k8s1"] = n
   366  
   367  	updateContext = &CNPStatusUpdateContext{
   368  		CiliumNPClient: ciliumNPClient,
   369  		NodeName:       "k8s1",
   370  	}
   371  
   372  	cnpns = wantedCNPS.Nodes["k8s1"]
   373  	err = updateContext.update(cnp, cnpns.Enforcing, cnpns.OK, err, cnpns.Revision, cnpns.Annotations)
   374  	c.Assert(err, IsNil)
   375  
   376  	if integrationTest {
   377  		cnp.CiliumNetworkPolicy, err = ciliumNPClient.CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()).Get(cnp.GetName(), metav1.GetOptions{})
   378  		c.Assert(err, IsNil)
   379  
   380  		// Ignore timestamps
   381  		n := cnp.Status.Nodes["k8s1"]
   382  		n.LastUpdated = v2.Timestamp{}
   383  		cnp.Status.Nodes["k8s1"] = n
   384  		n = cnp.Status.Nodes["k8s2"]
   385  		n.LastUpdated = v2.Timestamp{}
   386  		cnp.Status.Nodes["k8s2"] = n
   387  
   388  		c.Assert(cnp.Status, checker.DeepEquals, wantedCNP.Status)
   389  	} else {
   390  		c.Assert(cnp.Status, checker.DeepEquals, wantedCNP.Status)
   391  	}
   392  }
   393  
   394  func (k *K8sIntegrationSuite) Test_updateCNPNodeStatus_1_10(c *C) {
   395  	c.Skip("Test not available as implementation is not made")
   396  	testUpdateCNPNodeStatusK8s(os.Getenv("INTEGRATION") != "", "1.10", c)
   397  }
   398  
   399  func (k *K8sIntegrationSuite) Test_updateCNPNodeStatus_1_13(c *C) {
   400  	testUpdateCNPNodeStatusK8s(os.Getenv("INTEGRATION") != "", "1.13", c)
   401  }
   402  
   403  func benchmarkCNPNodeStatusController(integrationTest bool, nNodes int, nParallelClients int, k8sVersion string, c *C) {
   404  	if !integrationTest {
   405  		c.Skip("Unit test only available with INTEGRATION=1")
   406  	}
   407  
   408  	err := k8sversion.Force(k8sVersion)
   409  	c.Assert(err, IsNil)
   410  
   411  	cnp := &types.SlimCNP{
   412  		CiliumNetworkPolicy: &v2.CiliumNetworkPolicy{
   413  			TypeMeta: metav1.TypeMeta{
   414  				Kind:       "CiliumNetworkPolicy",
   415  				APIVersion: "cilium.io/v2",
   416  			},
   417  			ObjectMeta: metav1.ObjectMeta{
   418  				Name:      "testing-policy",
   419  				Namespace: "default",
   420  			},
   421  			Spec: &api.Rule{
   422  				EndpointSelector: api.EndpointSelector{
   423  					LabelSelector: &metav1.LabelSelector{
   424  						MatchLabels: map[string]string{"foo": "bar"},
   425  					},
   426  				},
   427  			},
   428  		},
   429  	}
   430  
   431  	restConfig, err := CreateConfig()
   432  	c.Assert(err, IsNil)
   433  	err = Init()
   434  	c.Assert(err, IsNil)
   435  
   436  	// One client per node
   437  	ciliumNPClients := make([]clientset.Interface, nNodes)
   438  	for i := range ciliumNPClients {
   439  		ciliumNPClients[i], err = clientset.NewForConfig(restConfig)
   440  		c.Assert(err, IsNil)
   441  	}
   442  
   443  	cnp.CiliumNetworkPolicy, err = ciliumNPClients[0].CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()).Create(cnp.CiliumNetworkPolicy)
   444  	c.Assert(err, IsNil)
   445  	defer func() {
   446  		err = ciliumNPClients[0].CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()).Delete(cnp.GetName(), &metav1.DeleteOptions{})
   447  		c.Assert(err, IsNil)
   448  	}()
   449  
   450  	var cnpStore cache.Store
   451  	switch {
   452  	case k8sversion.Capabilities().UpdateStatus:
   453  		// k8s >= 1.13 does not require a store
   454  	default:
   455  		// TODO create a cache.Store per node
   456  		si := informer.NewSharedInformerFactory(ciliumNPClients[0], 5*time.Minute)
   457  		ciliumV2Controller := si.Cilium().V2().CiliumNetworkPolicies().Informer()
   458  		cnpStore = ciliumV2Controller.GetStore()
   459  		si.Start(wait.NeverStop)
   460  		var exists bool
   461  		// wait for the cnp created to be in the store
   462  		for !exists {
   463  			_, exists, err = cnpStore.Get(cnp)
   464  			time.Sleep(100 * time.Millisecond)
   465  		}
   466  	}
   467  
   468  	wg := sync.WaitGroup{}
   469  	wg.Add(nNodes)
   470  	r := make(chan int, nNodes)
   471  	for i := 0; i < nParallelClients; i++ {
   472  		go func() {
   473  			for i := range r {
   474  				updateContext := &CNPStatusUpdateContext{
   475  					CiliumNPClient: ciliumNPClients[i],
   476  					NodeName:       "k8s" + strconv.Itoa(i),
   477  					CiliumV2Store:  cnpStore,
   478  					WaitForEndpointsAtPolicyRev: func(ctx context.Context, rev uint64) error {
   479  						return nil
   480  					},
   481  				}
   482  				err := updateContext.UpdateStatus(context.Background(), cnp, uint64(i), nil)
   483  				c.Assert(err, IsNil)
   484  				wg.Done()
   485  			}
   486  		}()
   487  	}
   488  
   489  	start := time.Now()
   490  	c.ResetTimer()
   491  	for i := 0; i < nNodes; i++ {
   492  		r <- i
   493  	}
   494  	wg.Wait()
   495  	c.StopTimer()
   496  	c.Logf("Test took: %s", time.Since(start))
   497  }
   498  
   499  func (k *K8sIntegrationSuite) Benchmark_CNPNodeStatusController_1_10(c *C) {
   500  	nNodes, err := strconv.Atoi(os.Getenv("NODES"))
   501  	c.Assert(err, IsNil)
   502  
   503  	// create nTh parallel clients. We achieve better results if the number
   504  	// of clients are not the same as number of NODES. We can simulate 1000 Nodes
   505  	// but we can simulate 1000 clients with a 8 CPU machine.
   506  	nClients := go_runtime.NumCPU()
   507  	if nClientsStr := os.Getenv("PARALLEL_CLIENTS"); nClientsStr != "" {
   508  		nClients, err = strconv.Atoi(nClientsStr)
   509  		c.Assert(err, IsNil)
   510  	}
   511  	c.Logf("Running with %d parallel clients and %d nodes", nClients, nNodes)
   512  	benchmarkCNPNodeStatusController(os.Getenv("INTEGRATION") != "", nNodes, nClients, "1.10", c)
   513  }
   514  
   515  func (k *K8sIntegrationSuite) Benchmark_CNPNodeStatusController_1_13(c *C) {
   516  	nNodes, err := strconv.Atoi(os.Getenv("NODES"))
   517  	c.Assert(err, IsNil)
   518  
   519  	// create nTh parallel clients. We achieve better results if the number
   520  	// of clients are not the same as number of NODES. We can simulate 1000 Nodes
   521  	// but we can simulate 1000 clients with a 8 CPU machine.
   522  	nClients := go_runtime.NumCPU()
   523  	if nClientsStr := os.Getenv("PARALLEL_CLIENTS"); nClientsStr != "" {
   524  		nClients, err = strconv.Atoi(nClientsStr)
   525  		c.Assert(err, IsNil)
   526  	}
   527  	c.Logf("Running with %d parallel clients and %d nodes", nClients, nNodes)
   528  	benchmarkCNPNodeStatusController(os.Getenv("INTEGRATION") != "", nNodes, nClients, "1.13", c)
   529  }
   530  
   531  func (k *K8sIntegrationSuite) benchmarkUpdateCNPNodeStatus(integrationTest bool, nNodes int, nParallelClients int, k8sVersion string, c *C) {
   532  	err := k8sversion.Force(k8sVersion)
   533  	c.Assert(err, IsNil)
   534  	cnp := &types.SlimCNP{
   535  		CiliumNetworkPolicy: &v2.CiliumNetworkPolicy{
   536  			TypeMeta: metav1.TypeMeta{
   537  				Kind:       "CiliumNetworkPolicy",
   538  				APIVersion: "cilium.io/v2",
   539  			},
   540  			ObjectMeta: metav1.ObjectMeta{
   541  				Name:      "testing-policy",
   542  				Namespace: "default",
   543  			},
   544  			Spec: &api.Rule{
   545  				EndpointSelector: api.EndpointSelector{
   546  					LabelSelector: &metav1.LabelSelector{
   547  						MatchLabels: map[string]string{"foo": "bar"},
   548  					},
   549  				},
   550  			},
   551  		},
   552  	}
   553  
   554  	// One client per node
   555  	ciliumNPClients := make([]clientset.Interface, nNodes)
   556  	if integrationTest {
   557  		restConfig, err := CreateConfig()
   558  		c.Assert(err, IsNil)
   559  		for i := range ciliumNPClients {
   560  			ciliumNPClients[i], err = clientset.NewForConfig(restConfig)
   561  			c.Assert(err, IsNil)
   562  		}
   563  		cnp.CiliumNetworkPolicy, err = ciliumNPClients[0].CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()).Create(cnp.CiliumNetworkPolicy)
   564  		c.Assert(err, IsNil)
   565  		defer func() {
   566  			err = ciliumNPClients[0].CiliumV2().CiliumNetworkPolicies(cnp.GetNamespace()).Delete(cnp.GetName(), &metav1.DeleteOptions{})
   567  			c.Assert(err, IsNil)
   568  		}()
   569  	} else {
   570  		ciliumNPClientFake := &fake.Clientset{}
   571  		ciliumNPClientFake.AddReactor("patch", "ciliumnetworkpolicies",
   572  			func(action k8sTesting.Action) (bool, runtime.Object, error) {
   573  				time.Sleep(1 * time.Millisecond)
   574  				return true, cnp.CiliumNetworkPolicy, nil
   575  			})
   576  		ciliumNPClientFake.AddReactor("get", "ciliumnetworkpolicies",
   577  			func(action k8sTesting.Action) (bool, runtime.Object, error) {
   578  				time.Sleep(1 * time.Millisecond)
   579  				return true, cnp.CiliumNetworkPolicy, nil
   580  			})
   581  		ciliumNPClientFake.AddReactor("update", "ciliumnetworkpolicies",
   582  			func(action k8sTesting.Action) (bool, runtime.Object, error) {
   583  				ua := action.(k8sTesting.UpdateAction)
   584  				cnp := ua.GetObject().(*v2.CiliumNetworkPolicy)
   585  				time.Sleep(1 * time.Millisecond)
   586  				return true, cnp, nil
   587  			})
   588  
   589  		for i := range ciliumNPClients {
   590  			ciliumNPClients[i] = ciliumNPClientFake
   591  		}
   592  	}
   593  	wg := sync.WaitGroup{}
   594  	wg.Add(nNodes)
   595  	r := make(chan int, nNodes)
   596  	for i := 0; i < nParallelClients; i++ {
   597  		go func() {
   598  			for i := range r {
   599  				updateContext := &CNPStatusUpdateContext{
   600  					CiliumNPClient: ciliumNPClients[i],
   601  					NodeName:       "k8s" + strconv.Itoa(i),
   602  				}
   603  				err := updateContext.update(cnp, true, true, nil, uint64(i), nil)
   604  				c.Assert(err, IsNil)
   605  				wg.Done()
   606  			}
   607  		}()
   608  	}
   609  
   610  	start := time.Now()
   611  	c.ResetTimer()
   612  	for i := 0; i < nNodes; i++ {
   613  		r <- i
   614  	}
   615  	wg.Wait()
   616  	c.StopTimer()
   617  	c.Logf("Test took: %s", time.Since(start))
   618  }
   619  
   620  func (k *K8sIntegrationSuite) Benchmark_UpdateCNPNodeStatus_1_10(c *C) {
   621  	nNodes, err := strconv.Atoi(os.Getenv("NODES"))
   622  	c.Assert(err, IsNil)
   623  
   624  	// create nTh parallel clients. We achieve better results if the number
   625  	// of clients are not the same as number of NODES. We can simulate 1000 Nodes
   626  	// but we can simulate 1000 clients with a 8 CPU machine.
   627  	nClients := go_runtime.NumCPU()
   628  	if nClientsStr := os.Getenv("PARALLEL_CLIENTS"); nClientsStr != "" {
   629  		nClients, err = strconv.Atoi(nClientsStr)
   630  		c.Assert(err, IsNil)
   631  	}
   632  	c.Logf("Running with %d parallel clients and %d nodes", nClients, nNodes)
   633  	k.benchmarkUpdateCNPNodeStatus(os.Getenv("INTEGRATION") != "", nNodes, nClients, "1.10", c)
   634  }
   635  
   636  func (k *K8sIntegrationSuite) Benchmark_UpdateCNPNodeStatus_1_13(c *C) {
   637  	nNodes, err := strconv.Atoi(os.Getenv("NODES"))
   638  	c.Assert(err, IsNil)
   639  
   640  	// create nTh parallel clients. We achieve better results if the number
   641  	// of clients are not the same as number of NODES. We can simulate 1000 Nodes
   642  	// but we can simulate 1000 clients with a 8 CPU machine.
   643  	nClients := go_runtime.NumCPU()
   644  	if nClientsStr := os.Getenv("PARALLEL_CLIENTS"); nClientsStr != "" {
   645  		nClients, err = strconv.Atoi(nClientsStr)
   646  		c.Assert(err, IsNil)
   647  	}
   648  	c.Logf("Running with %d parallel clients and %d nodes", nClients, nNodes)
   649  	k.benchmarkUpdateCNPNodeStatus(os.Getenv("INTEGRATION") != "", nNodes, nClients, "1.13", c)
   650  }
   651  
   652  func (k *K8sIntegrationSuite) benchmarkGetNodes(integrationTest bool, nCycles int, nParallelClients int, protobuf bool, c *C) {
   653  
   654  	// One client per node
   655  	k8sClients := make([]kubernetes.Interface, nParallelClients)
   656  	if integrationTest {
   657  		restConfig, err := CreateConfig()
   658  		c.Assert(err, IsNil)
   659  		if protobuf {
   660  			restConfig.ContentConfig.ContentType = `application/vnd.kubernetes.protobuf`
   661  		}
   662  		for i := range k8sClients {
   663  			k8sClients[i], err = kubernetes.NewForConfig(restConfig)
   664  			c.Assert(err, IsNil)
   665  		}
   666  	}
   667  	wg := sync.WaitGroup{}
   668  	wg.Add(nCycles)
   669  	r := make(chan int, nCycles)
   670  	for i := 0; i < nParallelClients; i++ {
   671  		go func(clientID int) {
   672  			for range r {
   673  				_, err := k8sClients[clientID].CoreV1().Nodes().Get("k8s1", metav1.GetOptions{})
   674  				c.Assert(err, IsNil)
   675  				wg.Done()
   676  			}
   677  		}(i)
   678  	}
   679  
   680  	start := time.Now()
   681  	c.ResetTimer()
   682  	for i := 0; i < nCycles; i++ {
   683  		r <- i
   684  	}
   685  	wg.Wait()
   686  	c.StopTimer()
   687  	c.Logf("Test took: %s", time.Since(start))
   688  }
   689  
   690  func (k *K8sIntegrationSuite) Benchmark_GetNodesProto(c *C) {
   691  	nCycles, err := strconv.Atoi(os.Getenv("CYCLES"))
   692  	if err != nil {
   693  		nCycles = c.N
   694  	}
   695  
   696  	// create nTh parallel clients. We achieve better results if the number
   697  	// of clients are not the same as number of CYCLES. We can simulate 1000 Nodes
   698  	// but we can simulate 1000 clients with a 8 CPU machine.
   699  	nClients := go_runtime.NumCPU()
   700  	if nClientsStr := os.Getenv("PARALLEL_CLIENTS"); nClientsStr != "" {
   701  		nClients, err = strconv.Atoi(nClientsStr)
   702  		c.Assert(err, IsNil)
   703  	}
   704  	c.Logf("Running with %d parallel clients and %d nodes", nClients, nCycles)
   705  	k.benchmarkGetNodes(os.Getenv("INTEGRATION") != "", nCycles, nClients, true, c)
   706  }
   707  
   708  func (k *K8sIntegrationSuite) Benchmark_GetNodesJSON(c *C) {
   709  	nCycles, err := strconv.Atoi(os.Getenv("CYCLES"))
   710  	if err != nil {
   711  		nCycles = c.N
   712  	}
   713  
   714  	// create nTh parallel clients. We achieve better results if the number
   715  	// of clients are not the same as number of CYCLES. We can simulate 1000 Nodes
   716  	// but we can simulate 1000 clients with a 8 CPU machine.
   717  	nClients := go_runtime.NumCPU()
   718  	if nClientsStr := os.Getenv("PARALLEL_CLIENTS"); nClientsStr != "" {
   719  		nClients, err = strconv.Atoi(nClientsStr)
   720  		c.Assert(err, IsNil)
   721  	}
   722  	c.Logf("Running with %d parallel clients and %d nodes", nClients, nCycles)
   723  	k.benchmarkGetNodes(os.Getenv("INTEGRATION") != "", nCycles, nClients, false, c)
   724  }