github.com/kubeshop/testkube@v1.17.23/pkg/k8sclient/k8sclient.go (about)

     1  package k8sclient
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"io"
     8  	"net/http"
     9  	"os"
    10  	"path"
    11  	"time"
    12  
    13  	"github.com/pkg/errors"
    14  	"google.golang.org/appengine/log"
    15  	"k8s.io/client-go/dynamic"
    16  	"k8s.io/client-go/transport/spdy"
    17  
    18  	corev1 "k8s.io/api/core/v1"
    19  	networkv1 "k8s.io/api/networking/v1"
    20  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    21  	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    22  	"k8s.io/apimachinery/pkg/labels"
    23  	"k8s.io/apimachinery/pkg/util/intstr"
    24  	"k8s.io/apimachinery/pkg/util/wait"
    25  	"k8s.io/client-go/kubernetes"
    26  	"k8s.io/client-go/rest"
    27  	"k8s.io/client-go/tools/clientcmd"
    28  	"k8s.io/client-go/tools/portforward"
    29  )
    30  
    31  const (
    32  	apiServerDeploymentSelector = "app.kubernetes.io/name=api-server"
    33  	operatorDeploymentSelector  = "control-plane=controller-manager"
    34  )
    35  
    36  // ConnectToK8s establishes a connection to the k8s and returns a *kubernetes.Clientset
    37  func ConnectToK8s() (*kubernetes.Clientset, error) {
    38  	config, err := GetK8sClientConfig()
    39  	if err != nil {
    40  		return nil, err
    41  	}
    42  
    43  	clientset, err := kubernetes.NewForConfig(config)
    44  	if err != nil {
    45  		return nil, err
    46  	}
    47  
    48  	return clientset, nil
    49  }
    50  
    51  // ConnectToK8sDynamic establishes a connection to the k8s and returns a dynamic.Interface
    52  func ConnectToK8sDynamic() (dynamic.Interface, error) {
    53  	config, err := GetK8sClientConfig()
    54  	if err != nil {
    55  		return nil, err
    56  	}
    57  
    58  	clientset, err := dynamic.NewForConfig(config)
    59  	if err != nil {
    60  		return nil, err
    61  	}
    62  
    63  	return clientset, nil
    64  }
    65  
    66  func GetK8sClientConfig() (*rest.Config, error) {
    67  	var err error
    68  	var config *rest.Config
    69  	k8sConfigExists := false
    70  	homeDir, _ := os.UserHomeDir()
    71  	cubeConfigPath := path.Join(homeDir, ".kube/config")
    72  
    73  	if _, err = os.Stat(cubeConfigPath); err == nil {
    74  		k8sConfigExists = true
    75  	}
    76  
    77  	if cfg, exists := os.LookupEnv("KUBECONFIG"); exists {
    78  		config, err = clientcmd.BuildConfigFromFlags("", cfg)
    79  	} else if k8sConfigExists {
    80  		config, err = clientcmd.BuildConfigFromFlags("", cubeConfigPath)
    81  	} else {
    82  		config, err = rest.InClusterConfig()
    83  		if err == nil {
    84  			config.QPS = 40.0
    85  			config.Burst = 400.0
    86  		}
    87  	}
    88  
    89  	if err != nil {
    90  		return nil, err
    91  	}
    92  
    93  	return config, nil
    94  }
    95  
    96  // GetIngressAddress gets the hostname or ip address of the ingress with name.
    97  func GetIngressAddress(clientSet kubernetes.Interface, ingressName string, namespace string) (string, error) {
    98  	period := 30 * time.Second
    99  	ctx, cancel := context.WithTimeout(context.Background(), period)
   100  	defer cancel()
   101  
   102  	var ingress *networkv1.Ingress
   103  	var err error
   104  
   105  	processDone := make(chan bool)
   106  	go func() {
   107  		ingressCount := 0
   108  		for ingressCount == 0 {
   109  			ingress, err = clientSet.NetworkingV1().Ingresses(namespace).Get(ctx, ingressName, metav1.GetOptions{})
   110  			if err == nil {
   111  				ingressCount = len(ingress.Status.LoadBalancer.Ingress)
   112  			}
   113  			time.Sleep(time.Second)
   114  		}
   115  		processDone <- true
   116  	}()
   117  
   118  	select {
   119  	case <-ctx.Done():
   120  		err = fmt.Errorf("Getting ingress failed with timeout(%d sec) previous err: %w.", period, err)
   121  	case <-processDone:
   122  	}
   123  
   124  	if err != nil {
   125  		return "", err
   126  	}
   127  
   128  	address := ingress.Status.LoadBalancer.Ingress[0].Hostname
   129  	if len(address) == 0 {
   130  		address = ingress.Status.LoadBalancer.Ingress[0].IP
   131  	}
   132  
   133  	return address, nil
   134  }
   135  
   136  // IsPersistentVolumeClaimBound TODO: add description.
   137  func IsPersistentVolumeClaimBound(c kubernetes.Interface, podName, namespace string) wait.ConditionWithContextFunc {
   138  	return func(ctx context.Context) (bool, error) {
   139  		pv, err := c.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), podName, metav1.GetOptions{})
   140  		if err != nil {
   141  			return false, err
   142  		}
   143  
   144  		switch pv.Status.Phase {
   145  		case corev1.ClaimBound:
   146  			return true, nil
   147  		case corev1.ClaimLost:
   148  			return false, nil
   149  		}
   150  		return false, nil
   151  	}
   152  }
   153  
   154  // IsPodRunning check if the pod in question is running state
   155  func IsPodRunning(c kubernetes.Interface, podName, namespace string) wait.ConditionWithContextFunc {
   156  	return func(ctx context.Context) (bool, error) {
   157  		pod, err := c.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{})
   158  		if err != nil {
   159  			return false, err
   160  		}
   161  
   162  		switch pod.Status.Phase {
   163  		case corev1.PodRunning, corev1.PodSucceeded:
   164  			return true, nil
   165  		case corev1.PodFailed:
   166  			return false, nil
   167  		}
   168  		return false, nil
   169  	}
   170  }
   171  
   172  // HasPodSucceeded custom method for checing if Pod is succeded (handles PodFailed state too)
   173  func HasPodSucceeded(c kubernetes.Interface, podName, namespace string) wait.ConditionWithContextFunc {
   174  	return func(ctx context.Context) (bool, error) {
   175  		pod, err := c.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{})
   176  		if err != nil {
   177  			return false, err
   178  		}
   179  
   180  		switch pod.Status.Phase {
   181  		case corev1.PodSucceeded:
   182  			return true, nil
   183  		case corev1.PodFailed:
   184  			return false, nil
   185  		}
   186  		return false, nil
   187  	}
   188  }
   189  
   190  // IsPodReady check if the pod in question is running state
   191  func IsPodReady(c kubernetes.Interface, podName, namespace string) wait.ConditionWithContextFunc {
   192  	return func(ctx context.Context) (bool, error) {
   193  		pod, err := c.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{})
   194  		if err != nil {
   195  			return false, nil
   196  		}
   197  		if len(pod.Status.ContainerStatuses) == 0 {
   198  			return false, nil
   199  		}
   200  
   201  		for _, c := range pod.Status.ContainerStatuses {
   202  			if !c.Ready {
   203  				return false, nil
   204  			}
   205  		}
   206  		return true, nil
   207  	}
   208  }
   209  
   210  // WaitForPodsReady wait for pods to be running with a timeout, return error
   211  func WaitForPodsReady(k8sClient kubernetes.Interface, namespace string, instance string, timeout time.Duration) error {
   212  	ctx := context.TODO()
   213  	pods, err := k8sClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: "app.kubernetes.io/instance=" + instance})
   214  	if err != nil {
   215  		return err
   216  	}
   217  
   218  	for _, pod := range pods.Items {
   219  		if err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, IsPodRunning(k8sClient, pod.Name, namespace)); err != nil {
   220  			return err
   221  		}
   222  		if err := wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, IsPodReady(k8sClient, pod.Name, namespace)); err != nil {
   223  			return err
   224  		}
   225  	}
   226  	return nil
   227  }
   228  
   229  // GetClusterVersion returns the current version of the Kubernetes cluster
   230  func GetClusterVersion(k8sClient kubernetes.Interface) (string, error) {
   231  	version, err := k8sClient.Discovery().ServerVersion()
   232  	if err != nil {
   233  		return "", err
   234  	}
   235  
   236  	return version.String(), nil
   237  }
   238  
   239  // GetAPIServerLogs returns the latest logs from the API server deployment
   240  func GetAPIServerLogs(ctx context.Context, k8sClient kubernetes.Interface, namespace string) ([]string, error) {
   241  	return GetPodLogs(ctx, k8sClient, namespace, apiServerDeploymentSelector)
   242  }
   243  
   244  // GetOperatorLogs returns the logs from the operator
   245  func GetOperatorLogs(ctx context.Context, k8sClient kubernetes.Interface, namespace string) ([]string, error) {
   246  	return GetPodLogs(ctx, k8sClient, namespace, operatorDeploymentSelector)
   247  }
   248  
   249  // GetPodLogs returns logs for pods specified by the label selector
   250  func GetPodLogs(ctx context.Context, k8sClient kubernetes.Interface, namespace string, selector string) ([]string, error) {
   251  	pods, err := k8sClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
   252  		LabelSelector: selector,
   253  	})
   254  	if err != nil {
   255  		return []string{}, fmt.Errorf("could not get operator pods: %w", err)
   256  	}
   257  
   258  	logs := []string{}
   259  
   260  	for _, pod := range pods.Items {
   261  		for _, container := range pod.Spec.Containers {
   262  			podLogs, err := k8sClient.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{
   263  				Container: container.Name,
   264  			}).Stream(ctx)
   265  			if err != nil {
   266  				return []string{}, fmt.Errorf("error in getting operator deployment: %w", err)
   267  			}
   268  			defer podLogs.Close()
   269  			buf := new(bytes.Buffer)
   270  			_, err = io.Copy(buf, podLogs)
   271  			if err != nil {
   272  				return []string{}, fmt.Errorf("error in copy information from podLogs to buf")
   273  			}
   274  			logs = append(logs, fmt.Sprintf("Pod: %s \n Logs: \n %s", pod.Name, buf.String()))
   275  		}
   276  	}
   277  	return logs, nil
   278  }
   279  
   280  func PortForward(ctx context.Context, namespace, serviceName string, servicePort, localhostPort int, verbose bool) error {
   281  
   282  	clientSet, err := ConnectToK8s()
   283  	if err != nil {
   284  		return err
   285  	}
   286  	svc, err := clientSet.CoreV1().Services(namespace).Get(ctx, serviceName, v1.GetOptions{})
   287  	if err != nil {
   288  		return err
   289  	}
   290  
   291  	var podPort intstr.IntOrString
   292  	for _, port := range svc.Spec.Ports {
   293  		if port.Port == int32(servicePort) {
   294  			podPort = port.TargetPort
   295  			break
   296  		}
   297  	}
   298  
   299  	pods, err := clientSet.
   300  		CoreV1().
   301  		Pods(namespace).
   302  		List(ctx, v1.ListOptions{
   303  			LabelSelector: labels.SelectorFromSet(labels.Set(svc.Spec.Selector)).String(),
   304  		})
   305  	if err != nil {
   306  		return err
   307  	}
   308  
   309  	var servicePod *corev1.Pod
   310  	for _, pod := range pods.Items {
   311  		if pod.Status.Phase != corev1.PodRunning {
   312  			continue
   313  		}
   314  		servicePod = &pod
   315  		break
   316  	}
   317  
   318  	if servicePod == nil {
   319  		return fmt.Errorf("no running pods found for service %s/%s", namespace, serviceName)
   320  	}
   321  
   322  	var podPortNumber int32
   323  	for _, c := range servicePod.Spec.Containers {
   324  		for _, p := range c.Ports {
   325  			if p.ContainerPort == podPort.IntVal || p.Name == podPort.StrVal {
   326  				podPortNumber = p.ContainerPort
   327  				break
   328  			}
   329  		}
   330  	}
   331  
   332  	restConfig, err := GetK8sClientConfig()
   333  	if err != nil {
   334  		return err
   335  	}
   336  
   337  	transport, upgrader, err := spdy.RoundTripperFor(restConfig)
   338  	if err != nil {
   339  		return errors.Wrap(err, "create round tripper")
   340  	}
   341  
   342  	readyChan := make(chan struct{})
   343  
   344  	url := clientSet.
   345  		CoreV1().
   346  		RESTClient().
   347  		Post().
   348  		Resource("pods").
   349  		Namespace(namespace).
   350  		Name(servicePod.Name).
   351  		SubResource("portforward").
   352  		URL()
   353  
   354  	dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, url)
   355  	out := os.Stdout
   356  	if !verbose {
   357  		out = nil
   358  	}
   359  	forwarder, err := portforward.New(dialer, []string{fmt.Sprintf("%d:%d", localhostPort, podPortNumber)}, ctx.Done(), readyChan, out, os.Stderr)
   360  	if err != nil {
   361  		return errors.Wrap(err, "create port forwarder")
   362  	}
   363  
   364  	go func() {
   365  		if err = forwarder.ForwardPorts(); err != nil {
   366  			log.Errorf(ctx, "port forwarding failed: %v", err)
   367  		}
   368  	}()
   369  	<-readyChan
   370  	return nil
   371  }
   372  
   373  func IsPodOfServiceRunning(ctx context.Context, namespace, serviceName string) (bool, error) {
   374  	clientSet, err := ConnectToK8s()
   375  	if err != nil {
   376  		return false, err
   377  	}
   378  
   379  	svc, err := clientSet.CoreV1().Services(namespace).Get(ctx, serviceName, v1.GetOptions{})
   380  	if err != nil {
   381  		return false, err
   382  	}
   383  	pods, err := clientSet.
   384  		CoreV1().
   385  		Pods(namespace).
   386  		List(ctx, v1.ListOptions{
   387  			LabelSelector: labels.SelectorFromSet(labels.Set(svc.Spec.Selector)).String(),
   388  		})
   389  	if err != nil {
   390  		return false, err
   391  	}
   392  
   393  	if len(pods.Items) > 0 {
   394  		for _, pod := range pods.Items {
   395  			if pod.Status.Phase == corev1.PodRunning {
   396  				return true, nil
   397  			} else {
   398  				return false, nil
   399  			}
   400  		}
   401  	}
   402  	return false, nil
   403  
   404  }