github.com/nginxinc/kubernetes-ingress@v1.12.5/tests/suite/test_transport_server_tcp_load_balance.py (about)

     1  import pytest
     2  import re
     3  import socket
     4  import time
     5  
     6  from urllib3.exceptions import NewConnectionError
     7  
     8  from suite.resources_utils import (
     9      wait_before_test,
    10      get_ts_nginx_template_conf,
    11      scale_deployment,
    12      get_events,
    13      wait_for_event_increment,
    14  )
    15  from suite.custom_resources_utils import (
    16      patch_ts,
    17      read_ts,
    18      delete_ts,
    19      create_ts_from_yaml,
    20  )
    21  from settings import TEST_DATA
    22  
    23  @pytest.mark.ts
    24  @pytest.mark.parametrize(
    25      "crd_ingress_controller, transport_server_setup",
    26      [
    27          (
    28              {
    29                  "type": "complete",
    30                  "extra_args":
    31                      [
    32                          "-global-configuration=nginx-ingress/nginx-configuration",
    33                          "-enable-leader-election=false"
    34                      ]
    35              },
    36              {"example": "transport-server-tcp-load-balance"},
    37          )
    38      ],
    39      indirect=True,
    40  )
    41  class TestTransportServerTcpLoadBalance:
    42  
    43      def restore_ts(self, kube_apis, transport_server_setup) -> None:
    44          """
    45          Function to revert a TransportServer resource to a valid state.
    46          """
    47          patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/standard/transport-server.yaml"
    48          patch_ts(
    49              kube_apis.custom_objects,
    50              transport_server_setup.name,
    51              patch_src,
    52              transport_server_setup.namespace,
    53          )
    54          wait_before_test()
    55      
    56      def test_number_of_replicas(
    57          self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
    58      ):
    59          """
    60          The load balancing of TCP should result in 4 servers to match the 4 replicas of a service.
    61          """
    62          original = scale_deployment(kube_apis.apps_v1_api, "tcp-service", transport_server_setup.namespace, 4)
    63          
    64          num_servers = 0
    65          retry = 0
    66  
    67          while(num_servers is not 4 and retry <= 30):
    68              result_conf = get_ts_nginx_template_conf(
    69                  kube_apis.v1,
    70                  transport_server_setup.namespace,
    71                  transport_server_setup.name,
    72                  transport_server_setup.ingress_pod_name,
    73                  ingress_controller_prerequisites.namespace
    74              )
    75  
    76              pattern = 'server .*;'
    77              num_servers = len(re.findall(pattern, result_conf))
    78              retry += 1
    79              wait_before_test(1)
    80              print(f"Retry #{retry}")
    81  
    82          assert num_servers is 4
    83  
    84          scale_deployment(kube_apis.apps_v1_api, "tcp-service", transport_server_setup.namespace, original)
    85          retry = 0
    86          while(num_servers is not original and retry <= 50):
    87              result_conf = get_ts_nginx_template_conf(
    88                  kube_apis.v1,
    89                  transport_server_setup.namespace,
    90                  transport_server_setup.name,
    91                  transport_server_setup.ingress_pod_name,
    92                  ingress_controller_prerequisites.namespace
    93              )
    94              
    95              pattern = 'server .*;'
    96              num_servers = len(re.findall(pattern, result_conf))
    97              retry += 1
    98              wait_before_test(1)
    99              print(f"Retry #{retry}")
   100          
   101          assert num_servers is original
   102  
   103      def test_tcp_request_load_balanced(
   104              self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
   105      ):
   106          """
   107          Requests to the load balanced TCP service should result in responses from 3 different endpoints.
   108          """
   109          wait_before_test()
   110          port = transport_server_setup.public_endpoint.tcp_server_port
   111          host = transport_server_setup.public_endpoint.public_ip
   112  
   113          print(f"sending tcp requests to: {host}:{port}")
   114  
   115          endpoints = {}
   116          retry = 0
   117          while(len(endpoints) is not 3 and retry <= 30):
   118              for i in range(20):
   119                  client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   120                  client.connect((host, port))
   121                  client.sendall(b'connect')
   122                  response = client.recv(4096)
   123                  endpoint = response.decode()
   124                  print(f' req number {i}; response: {endpoint}')
   125                  if endpoint not in endpoints:
   126                      endpoints[endpoint] = 1
   127                  else:
   128                      endpoints[endpoint] = endpoints[endpoint] + 1
   129                  client.close()
   130              retry += 1
   131              wait_before_test(1)
   132              print(f"Retry #{retry}")
   133  
   134          assert len(endpoints) is 3
   135  
   136          result_conf = get_ts_nginx_template_conf(
   137              kube_apis.v1,
   138              transport_server_setup.namespace,
   139              transport_server_setup.name,
   140              transport_server_setup.ingress_pod_name,
   141              ingress_controller_prerequisites.namespace
   142          )
   143  
   144          pattern = 'server .*;'
   145          servers = re.findall(pattern, result_conf)
   146          for key in endpoints.keys():
   147              found = False
   148              for server in servers:
   149                  if key in server:
   150                      found = True
   151              assert found
   152  
   153      def test_tcp_request_load_balanced_multiple(
   154              self, kube_apis, crd_ingress_controller, transport_server_setup
   155      ):
   156          """
   157          Requests to the load balanced TCP service should result in responses from 3 different endpoints.
   158          """
   159          port = transport_server_setup.public_endpoint.tcp_server_port
   160          host = transport_server_setup.public_endpoint.public_ip
   161  
   162          # Step 1, confirm load balancing is working.
   163          print(f"sending tcp requests to: {host}:{port}")
   164          client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   165          client.connect((host, port))
   166          client.sendall(b'connect')
   167          response = client.recv(4096)
   168          endpoint = response.decode()
   169          print(f'response: {endpoint}')
   170          client.close()
   171          assert endpoint is not ""
   172  
   173          # Step 2, add a second TransportServer with the same port and confirm te collision
   174          transport_server_file = f"{TEST_DATA}/transport-server-tcp-load-balance/second-transport-server.yaml"
   175          ts_resource = create_ts_from_yaml(
   176              kube_apis.custom_objects, transport_server_file, transport_server_setup.namespace
   177          )
   178          wait_before_test()
   179  
   180          second_ts_name = ts_resource['metadata']['name']
   181          response = read_ts(
   182              kube_apis.custom_objects,
   183              transport_server_setup.namespace,
   184              second_ts_name,
   185          )
   186          assert (
   187                  response["status"]
   188                  and response["status"]["reason"] == "Rejected"
   189                  and response["status"]["state"] == "Warning"
   190                  and response["status"]["message"] == "Listener tcp-server is taken by another resource"
   191          )
   192  
   193          # Step 3, remove the default TransportServer with the same port
   194          delete_ts(kube_apis.custom_objects, transport_server_setup.resource, transport_server_setup.namespace)
   195  
   196          wait_before_test()
   197          response = read_ts(
   198              kube_apis.custom_objects,
   199              transport_server_setup.namespace,
   200              second_ts_name,
   201          )
   202          assert (
   203                  response["status"]
   204                  and response["status"]["reason"] == "AddedOrUpdated"
   205                  and response["status"]["state"] == "Valid"
   206          )
   207  
   208          # Step 4, confirm load balancing is still working.
   209          print(f"sending tcp requests to: {host}:{port}")
   210          client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   211          client.connect((host, port))
   212          client.sendall(b'connect')
   213          response = client.recv(4096)
   214          endpoint = response.decode()
   215          print(f'response: {endpoint}')
   216          client.close()
   217          assert endpoint is not ""
   218  
   219          # cleanup
   220          delete_ts(kube_apis.custom_objects, ts_resource, transport_server_setup.namespace)
   221          transport_server_file = f"{TEST_DATA}/transport-server-tcp-load-balance/standard/transport-server.yaml"
   222          create_ts_from_yaml(
   223              kube_apis.custom_objects, transport_server_file, transport_server_setup.namespace
   224          )
   225          wait_before_test()
   226  
   227      def test_tcp_request_load_balanced_wrong_port(
   228              self, kube_apis, crd_ingress_controller, transport_server_setup
   229      ):
   230          """
   231          Requests to the load balanced TCP service should result in responses from 3 different endpoints.
   232          """
   233  
   234          patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/wrong-port-transport-server.yaml"
   235          patch_ts(
   236              kube_apis.custom_objects,
   237              transport_server_setup.name,
   238              patch_src,
   239              transport_server_setup.namespace,
   240          )
   241  
   242          wait_before_test()
   243  
   244          port = transport_server_setup.public_endpoint.tcp_server_port
   245          host = transport_server_setup.public_endpoint.public_ip
   246  
   247          print(f"sending tcp requests to: {host}:{port}")
   248          for i in range(3):
   249              try:
   250                  client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   251                  client.connect((host, port))
   252                  client.sendall(b'connect')
   253              except ConnectionResetError as E:
   254                  print("The expected exception occurred:", E)
   255  
   256          self.restore_ts(kube_apis, transport_server_setup)
   257  
   258      def test_tcp_request_load_balanced_missing_service(
   259              self, kube_apis, crd_ingress_controller, transport_server_setup
   260      ):
   261          """
   262          Requests to the load balanced TCP service should result in responses from 3 different endpoints.
   263          """
   264  
   265          patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/missing-service-transport-server.yaml"
   266          patch_ts(
   267              kube_apis.custom_objects,
   268              transport_server_setup.name,
   269              patch_src,
   270              transport_server_setup.namespace,
   271          )
   272  
   273          wait_before_test()
   274  
   275          port = transport_server_setup.public_endpoint.tcp_server_port
   276          host = transport_server_setup.public_endpoint.public_ip
   277  
   278          print(f"sending tcp requests to: {host}:{port}")
   279          for i in range(3):
   280              try:
   281                  client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   282                  client.connect((host, port))
   283                  client.sendall(b'connect')
   284              except ConnectionResetError as E:
   285                  print("The expected exception occurred:", E)
   286  
   287          self.restore_ts(kube_apis, transport_server_setup)
   288  
   289      def make_holding_connection(self, host, port):
   290          print(f"sending tcp requests to: {host}:{port}")
   291          client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   292          client.connect((host, port))
   293          client.sendall(b'hold')
   294          response = client.recv(4096)
   295          endpoint = response.decode()
   296          print(f'response: {endpoint}')
   297          return client
   298  
   299      def test_tcp_request_max_connections(
   300              self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
   301      ):
   302          """
   303          The config, maxConns, should limit the number of open TCP connections.
   304          3 replicas of max 2 connections is 6, so making the 7th connection will fail.
   305          """
   306  
   307          # step 1 - set max connections to 2 with 1 replica
   308          patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/max-connections-transport-server.yaml"
   309          patch_ts(
   310              kube_apis.custom_objects,
   311              transport_server_setup.name,
   312              patch_src,
   313              transport_server_setup.namespace,
   314          )
   315          wait_before_test()
   316          configs = 0
   317          retry = 0
   318          while(configs is not 3 and retry <= 30):
   319              result_conf = get_ts_nginx_template_conf(
   320                  kube_apis.v1,
   321                  transport_server_setup.namespace,
   322                  transport_server_setup.name,
   323                  transport_server_setup.ingress_pod_name,
   324                  ingress_controller_prerequisites.namespace
   325              )
   326  
   327              pattern = 'max_conns=2'
   328              configs = len(re.findall(pattern, result_conf))
   329              retry += 1
   330              wait_before_test(1)
   331              print(f"Retry #{retry}")
   332  
   333  
   334          assert configs is 3
   335  
   336          # step 2 - make the number of allowed connections
   337          port = transport_server_setup.public_endpoint.tcp_server_port
   338          host = transport_server_setup.public_endpoint.public_ip
   339  
   340          clients = []
   341          for i in range(6):
   342              c = self.make_holding_connection(host, port)
   343              clients.append(c)
   344  
   345          # step 3 - assert the next connection fails
   346          try:
   347              c = self.make_holding_connection(host, port)
   348              # making a connection should fail and throw an exception
   349              assert c is None
   350          except ConnectionResetError as E:
   351              print("The expected exception occurred:", E)
   352  
   353          for c in clients:
   354              c.close()
   355  
   356          # step 4 - revert to config with no max connections
   357          patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/standard/transport-server.yaml"
   358          patch_ts(
   359              kube_apis.custom_objects,
   360              transport_server_setup.name,
   361              patch_src,
   362              transport_server_setup.namespace,
   363          )
   364          wait_before_test()
   365  
   366          # step 5 - confirm making lots of connections doesn't cause an error
   367          clients = []
   368          for i in range(24):
   369              c = self.make_holding_connection(host, port)
   370              clients.append(c)
   371  
   372          for c in clients:
   373              c.close()
   374  
   375      def test_tcp_request_load_balanced_method(
   376              self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
   377      ):
   378          """
   379          Update load balancing method to 'hash'. This send requests to a specific pod based on it's IP. In this case
   380          resulting in a single endpoint handling all the requests.
   381          """
   382  
   383          # Step 1 - set the load balancing method.
   384  
   385          patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/method-transport-server.yaml"
   386          patch_ts(
   387              kube_apis.custom_objects,
   388              transport_server_setup.name,
   389              patch_src,
   390              transport_server_setup.namespace,
   391          )
   392          wait_before_test()
   393          num_servers = 0
   394          retry = 0
   395          while(num_servers is not 3 and retry <= 30):
   396              result_conf = get_ts_nginx_template_conf(
   397                  kube_apis.v1,
   398                  transport_server_setup.namespace,
   399                  transport_server_setup.name,
   400                  transport_server_setup.ingress_pod_name,
   401                  ingress_controller_prerequisites.namespace
   402              )
   403  
   404              pattern = 'server .*;'
   405              num_servers = len(re.findall(pattern, result_conf))
   406              retry += 1
   407              wait_before_test(1)
   408              print(f"Retry #{retry}")
   409  
   410          assert num_servers is 3
   411  
   412          # Step 2 - confirm all request go to the same endpoint.
   413  
   414          port = transport_server_setup.public_endpoint.tcp_server_port
   415          host = transport_server_setup.public_endpoint.public_ip
   416          endpoints = {}
   417          retry = 0
   418          while(len(endpoints) is not 1 and retry <= 30):
   419              for i in range(20):
   420                  client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   421                  client.connect((host, port))
   422                  client.sendall(b'connect')
   423                  response = client.recv(4096)
   424                  endpoint = response.decode()
   425                  print(f' req number {i}; response: {endpoint}')
   426                  if endpoint not in endpoints:
   427                      endpoints[endpoint] = 1
   428                  else:
   429                      endpoints[endpoint] = endpoints[endpoint] + 1
   430                  client.close()
   431              retry += 1
   432              wait_before_test(1)
   433              print(f"Retry #{retry}")
   434  
   435          assert len(endpoints) is 1    
   436  
   437          # Step 3 - restore to default load balancing method and confirm requests are balanced.
   438  
   439          self.restore_ts(kube_apis, transport_server_setup)
   440          wait_before_test()
   441  
   442          endpoints = {}
   443          retry = 0
   444          while(len(endpoints) is not 3 and retry <= 30):
   445              for i in range(20):
   446                  client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   447                  client.connect((host, port))
   448                  client.sendall(b'connect')
   449                  response = client.recv(4096)
   450                  endpoint = response.decode()
   451                  print(f' req number {i}; response: {endpoint}')
   452                  if endpoint not in endpoints:
   453                      endpoints[endpoint] = 1
   454                  else:
   455                      endpoints[endpoint] = endpoints[endpoint] + 1
   456                  client.close()
   457              retry += 1
   458              wait_before_test(1)
   459              print(f"Retry #{retry}")
   460  
   461          assert len(endpoints) is 3
   462  
   463      @pytest.mark.skip_for_nginx_oss
   464      def test_tcp_passing_healthcheck_with_match(
   465              self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
   466      ):
   467          """
   468          Configure a passing health check and check that all backend pods return responses.
   469          """
   470  
   471          # Step 1 - configure a passing health check
   472  
   473          patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/passing-hc-transport-server.yaml"
   474          patch_ts(
   475              kube_apis.custom_objects,
   476              transport_server_setup.name,
   477              patch_src,
   478              transport_server_setup.namespace,
   479          )
   480          # 4s includes 3s timeout for a health check to fail in case of a connection timeout to a backend pod
   481          wait_before_test(4)
   482  
   483          result_conf = get_ts_nginx_template_conf(
   484              kube_apis.v1,
   485              transport_server_setup.namespace,
   486              transport_server_setup.name,
   487              transport_server_setup.ingress_pod_name,
   488              ingress_controller_prerequisites.namespace
   489          )
   490  
   491          match = f"match_ts_{transport_server_setup.namespace}_transport-server_tcp-app"
   492  
   493          assert "health_check interval=5s port=3333" in result_conf
   494          assert f"passes=1 jitter=0s fails=1 match={match}" in result_conf
   495          assert "health_check_timeout 3s;"
   496          assert 'send "health"' in result_conf
   497          assert 'expect  "healthy"' in result_conf
   498  
   499          # Step 2 - confirm load balancing works
   500  
   501          port = transport_server_setup.public_endpoint.tcp_server_port
   502          host = transport_server_setup.public_endpoint.public_ip
   503  
   504          endpoints = {}
   505          retry = 0
   506          while(len(endpoints) is not 3 and retry <= 30):
   507              for i in range(20):
   508                  client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   509                  client.connect((host, port))
   510                  client.sendall(b'connect')
   511                  response = client.recv(4096)
   512                  endpoint = response.decode()
   513                  print(f' req number {i}; response: {endpoint}')
   514                  if endpoint not in endpoints:
   515                      endpoints[endpoint] = 1
   516                  else:
   517                      endpoints[endpoint] = endpoints[endpoint] + 1
   518                  client.close()
   519              retry += 1
   520              wait_before_test(1)
   521              print(f"Retry #{retry}")
   522          assert len(endpoints) is 3
   523  
   524          # Step 3 - restore
   525  
   526          self.restore_ts(kube_apis, transport_server_setup)
   527  
   528      @pytest.mark.skip_for_nginx_oss
   529      def test_tcp_failing_healthcheck_with_match(
   530              self, kube_apis, crd_ingress_controller, transport_server_setup, ingress_controller_prerequisites
   531      ):
   532          """
   533          Configure a failing health check and check that NGINX Plus resets connections.
   534          """
   535  
   536          # Step 1 - configure a failing health check
   537  
   538          patch_src = f"{TEST_DATA}/transport-server-tcp-load-balance/failing-hc-transport-server.yaml"
   539          patch_ts(
   540              kube_apis.custom_objects,
   541              transport_server_setup.name,
   542              patch_src,
   543              transport_server_setup.namespace,
   544          )
   545          # 4s includes 3s timeout for a health check to fail in case of a connection timeout to a backend pod
   546          wait_before_test(4)
   547  
   548          result_conf = get_ts_nginx_template_conf(
   549              kube_apis.v1,
   550              transport_server_setup.namespace,
   551              transport_server_setup.name,
   552              transport_server_setup.ingress_pod_name,
   553              ingress_controller_prerequisites.namespace
   554          )
   555  
   556          match = f"match_ts_{transport_server_setup.namespace}_transport-server_tcp-app"
   557  
   558          assert "health_check interval=5s port=3333" in result_conf
   559          assert f"passes=1 jitter=0s fails=1 match={match}" in result_conf
   560          assert "health_check_timeout 3s"
   561          assert 'send "health"' in result_conf
   562          assert 'expect  "unmatched"' in result_conf
   563  
   564          # Step 2 - confirm load balancing doesn't work
   565  
   566          port = transport_server_setup.public_endpoint.tcp_server_port
   567          host = transport_server_setup.public_endpoint.public_ip
   568  
   569          client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
   570          client.connect((host, port))
   571          client.sendall(b'connect')
   572  
   573          try:
   574              client.recv(4096) # must return ConnectionResetError
   575              client.close()
   576              pytest.fail("We expected an error here, but didn't get it. Exiting...")
   577          except ConnectionResetError as ex:
   578              # expected error
   579              print(f"There was an expected exception {str(ex)}")
   580  
   581          # Step 3 - restore
   582  
   583          self.restore_ts(kube_apis, transport_server_setup)