github.com/joelanford/operator-sdk@v0.8.2/internal/pkg/scorecard/resource_handler.go (about)

     1  // Copyright 2019 The Operator-SDK Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package scorecard
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"encoding/json"
    21  	"fmt"
    22  	"io/ioutil"
    23  	"os"
    24  	"time"
    25  
    26  	"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
    27  	proxyConf "github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubeconfig"
    28  	"github.com/operator-framework/operator-sdk/pkg/k8sutil"
    29  
    30  	"github.com/ghodss/yaml"
    31  	"github.com/spf13/viper"
    32  	appsv1 "k8s.io/api/apps/v1"
    33  	v1 "k8s.io/api/core/v1"
    34  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    35  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    36  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    37  	"k8s.io/apimachinery/pkg/labels"
    38  	"k8s.io/apimachinery/pkg/runtime"
    39  	"k8s.io/apimachinery/pkg/types"
    40  	"k8s.io/apimachinery/pkg/util/wait"
    41  	"k8s.io/client-go/kubernetes"
    42  	"sigs.k8s.io/controller-runtime/pkg/client"
    43  )
    44  
    45  type cleanupFn func() error
    46  
    47  // waitUntilCRStatusExists waits until the status block of the CR currently being tested exists. If the timeout
    48  // is reached, it simply continues and assumes there is no status block
    49  func waitUntilCRStatusExists(cr *unstructured.Unstructured) error {
    50  	err := wait.Poll(time.Second*1, time.Second*time.Duration(viper.GetInt(InitTimeoutOpt)), func() (bool, error) {
    51  		err := runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: cr.GetNamespace(), Name: cr.GetName()}, cr)
    52  		if err != nil {
    53  			return false, fmt.Errorf("error getting custom resource: %v", err)
    54  		}
    55  		if cr.Object["status"] != nil {
    56  			return true, nil
    57  		}
    58  		return false, nil
    59  	})
    60  	if err != nil && err != wait.ErrWaitTimeout {
    61  		return err
    62  	}
    63  	return nil
    64  }
    65  
    66  // yamlToUnstructured decodes a yaml file into an unstructured object
    67  func yamlToUnstructured(yamlPath string) (*unstructured.Unstructured, error) {
    68  	yamlFile, err := ioutil.ReadFile(yamlPath)
    69  	if err != nil {
    70  		return nil, fmt.Errorf("failed to read file %s: %v", yamlPath, err)
    71  	}
    72  	if bytes.Contains(yamlFile, []byte("\n---\n")) {
    73  		return nil, fmt.Errorf("custom resource manifest cannot have more than 1 resource")
    74  	}
    75  	obj := &unstructured.Unstructured{}
    76  	jsonSpec, err := yaml.YAMLToJSON(yamlFile)
    77  	if err != nil {
    78  		return nil, fmt.Errorf("could not convert yaml file to json: %v", err)
    79  	}
    80  	if err := obj.UnmarshalJSON(jsonSpec); err != nil {
    81  		return nil, fmt.Errorf("failed to unmarshal custom resource manifest to unstructured: %s", err)
    82  	}
    83  	// set the namespace
    84  	obj.SetNamespace(viper.GetString(NamespaceOpt))
    85  	return obj, nil
    86  }
    87  
    88  // createFromYAMLFile will take a path to a YAML file and create the resource. If it finds a
    89  // deployment, it will add the scorecard proxy as a container in the deployments podspec.
    90  func createFromYAMLFile(yamlPath string) error {
    91  	yamlSpecs, err := ioutil.ReadFile(yamlPath)
    92  	if err != nil {
    93  		return fmt.Errorf("failed to read file %s: %v", yamlPath, err)
    94  	}
    95  	scanner := yamlutil.NewYAMLScanner(yamlSpecs)
    96  	for scanner.Scan() {
    97  		obj := &unstructured.Unstructured{}
    98  		jsonSpec, err := yaml.YAMLToJSON(scanner.Bytes())
    99  		if err != nil {
   100  			return fmt.Errorf("could not convert yaml file to json: %v", err)
   101  		}
   102  		if err := obj.UnmarshalJSON(jsonSpec); err != nil {
   103  			return fmt.Errorf("could not unmarshal resource spec: %v", err)
   104  		}
   105  		obj.SetNamespace(viper.GetString(NamespaceOpt))
   106  
   107  		// dirty hack to merge scorecard proxy into operator deployment; lots of serialization and deserialization
   108  		if obj.GetKind() == "Deployment" {
   109  			// TODO: support multiple deployments
   110  			if deploymentName != "" {
   111  				return fmt.Errorf("scorecard currently does not support multiple deployments in the manifests")
   112  			}
   113  			dep, err := unstructuredToDeployment(obj)
   114  			if err != nil {
   115  				return fmt.Errorf("failed to convert object to deployment: %v", err)
   116  			}
   117  			deploymentName = dep.GetName()
   118  			err = createKubeconfigSecret()
   119  			if err != nil {
   120  				return fmt.Errorf("failed to create kubeconfig secret for scorecard-proxy: %v", err)
   121  			}
   122  			addMountKubeconfigSecret(dep)
   123  			addProxyContainer(dep)
   124  			// go back to unstructured to create
   125  			obj, err = deploymentToUnstructured(dep)
   126  			if err != nil {
   127  				return fmt.Errorf("failed to convert deployment to unstructured: %v", err)
   128  			}
   129  		}
   130  		err = runtimeClient.Create(context.TODO(), obj)
   131  		if err != nil {
   132  			_, restErr := restMapper.RESTMappings(obj.GetObjectKind().GroupVersionKind().GroupKind())
   133  			if restErr == nil {
   134  				return err
   135  			}
   136  			// don't store error, as only error will be timeout. Error from runtime client will be easier for
   137  			// the user to understand than the timeout error, so just use that if we fail
   138  			_ = wait.PollImmediate(time.Second*1, time.Second*10, func() (bool, error) {
   139  				restMapper.Reset()
   140  				_, err := restMapper.RESTMappings(obj.GetObjectKind().GroupVersionKind().GroupKind())
   141  				if err != nil {
   142  					return false, nil
   143  				}
   144  				return true, nil
   145  			})
   146  			err = runtimeClient.Create(context.TODO(), obj)
   147  			if err != nil {
   148  				return err
   149  			}
   150  		}
   151  		addResourceCleanup(obj, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()})
   152  		if obj.GetKind() == "Deployment" {
   153  			proxyPodGlobal, err = getPodFromDeployment(deploymentName, viper.GetString(NamespaceOpt))
   154  			if err != nil {
   155  				return err
   156  			}
   157  		}
   158  	}
   159  	if err := scanner.Err(); err != nil {
   160  		return fmt.Errorf("failed to scan %s: (%v)", yamlPath, err)
   161  	}
   162  
   163  	return nil
   164  }
   165  
   166  // getPodFromDeployment returns a deployment depName's pod in namespace.
   167  func getPodFromDeployment(depName, namespace string) (pod *v1.Pod, err error) {
   168  	dep := &appsv1.Deployment{}
   169  	err = runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: depName}, dep)
   170  	if err != nil {
   171  		return nil, fmt.Errorf("failed to get newly created deployment: %v", err)
   172  	}
   173  	set := labels.Set(dep.Spec.Selector.MatchLabels)
   174  	// In some cases, the pod from the old deployment will be picked up
   175  	// instead of the new one.
   176  	err = wait.PollImmediate(time.Second*1, time.Second*60, func() (bool, error) {
   177  		pods := &v1.PodList{}
   178  		err = runtimeClient.List(context.TODO(), &client.ListOptions{LabelSelector: set.AsSelector()}, pods)
   179  		if err != nil {
   180  			return false, fmt.Errorf("failed to get list of pods in deployment: %v", err)
   181  		}
   182  		// Make sure the pods exist. There should only be 1 pod per deployment.
   183  		if len(pods.Items) == 1 {
   184  			// If the pod has a deletion timestamp, it is the old pod; wait for
   185  			// pod with no deletion timestamp
   186  			if pods.Items[0].GetDeletionTimestamp() == nil {
   187  				pod = &pods.Items[0]
   188  				return true, nil
   189  			}
   190  		} else {
   191  			log.Debug("Operator deployment has more than 1 pod")
   192  		}
   193  		return false, nil
   194  	})
   195  	if err != nil {
   196  		return nil, fmt.Errorf("failed to get proxyPod: %s", err)
   197  	}
   198  	return pod, nil
   199  }
   200  
   201  // createKubeconfigSecret creates the secret that will be mounted in the operator's container and contains
   202  // the kubeconfig for communicating with the proxy
   203  func createKubeconfigSecret() error {
   204  	kubeconfigMap := make(map[string][]byte)
   205  	kc, err := proxyConf.Create(metav1.OwnerReference{Name: "scorecard"}, "http://localhost:8889", viper.GetString(NamespaceOpt))
   206  	if err != nil {
   207  		return err
   208  	}
   209  	defer func() {
   210  		if err := os.Remove(kc.Name()); err != nil {
   211  			log.Errorf("Failed to delete generated kubeconfig file: (%v)", err)
   212  		}
   213  	}()
   214  	kc, err = os.Open(kc.Name())
   215  	if err != nil {
   216  		return err
   217  	}
   218  	kcBytes, err := ioutil.ReadAll(kc)
   219  	if err != nil {
   220  		return err
   221  	}
   222  	kubeconfigMap["kubeconfig"] = kcBytes
   223  	kubeconfigSecret := &v1.Secret{
   224  		ObjectMeta: metav1.ObjectMeta{
   225  			Name:      "scorecard-kubeconfig",
   226  			Namespace: viper.GetString(NamespaceOpt),
   227  		},
   228  		Data: kubeconfigMap,
   229  	}
   230  	err = runtimeClient.Create(context.TODO(), kubeconfigSecret)
   231  	if err != nil {
   232  		return err
   233  	}
   234  	addResourceCleanup(kubeconfigSecret, types.NamespacedName{Namespace: kubeconfigSecret.GetNamespace(), Name: kubeconfigSecret.GetName()})
   235  	return nil
   236  }
   237  
   238  // addMountKubeconfigSecret creates the volume mount for the kubeconfig secret
   239  func addMountKubeconfigSecret(dep *appsv1.Deployment) {
   240  	// create mount for secret
   241  	dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, v1.Volume{
   242  		Name: "scorecard-kubeconfig",
   243  		VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{
   244  			SecretName: "scorecard-kubeconfig",
   245  			Items: []v1.KeyToPath{{
   246  				Key:  "kubeconfig",
   247  				Path: "config",
   248  			}},
   249  		},
   250  		},
   251  	})
   252  	for index := range dep.Spec.Template.Spec.Containers {
   253  		// mount the volume
   254  		dep.Spec.Template.Spec.Containers[index].VolumeMounts = append(dep.Spec.Template.Spec.Containers[index].VolumeMounts, v1.VolumeMount{
   255  			Name:      "scorecard-kubeconfig",
   256  			MountPath: "/scorecard-secret",
   257  		})
   258  		// specify the path via KUBECONFIG env var
   259  		dep.Spec.Template.Spec.Containers[index].Env = append(dep.Spec.Template.Spec.Containers[index].Env, v1.EnvVar{
   260  			Name:  "KUBECONFIG",
   261  			Value: "/scorecard-secret/config",
   262  		})
   263  	}
   264  }
   265  
   266  // addProxyContainer adds the container spec for the scorecard-proxy to the deployment's podspec
   267  func addProxyContainer(dep *appsv1.Deployment) {
   268  	pullPolicyString := viper.GetString(ProxyPullPolicyOpt)
   269  	var pullPolicy v1.PullPolicy
   270  	switch pullPolicyString {
   271  	case "Always":
   272  		pullPolicy = v1.PullAlways
   273  	case "Never":
   274  		pullPolicy = v1.PullNever
   275  	case "PullIfNotPresent":
   276  		pullPolicy = v1.PullIfNotPresent
   277  	default:
   278  		// this case shouldn't happen since we check the values in scorecard.go, but just in case, we'll default to always to prevent errors
   279  		pullPolicy = v1.PullAlways
   280  	}
   281  	dep.Spec.Template.Spec.Containers = append(dep.Spec.Template.Spec.Containers, v1.Container{
   282  		Name:            scorecardContainerName,
   283  		Image:           viper.GetString(ProxyImageOpt),
   284  		ImagePullPolicy: pullPolicy,
   285  		Command:         []string{"scorecard-proxy"},
   286  		Env: []v1.EnvVar{{
   287  			Name:      k8sutil.WatchNamespaceEnvVar,
   288  			ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},
   289  		}},
   290  	})
   291  }
   292  
   293  // unstructuredToDeployment converts an unstructured object to a deployment
   294  func unstructuredToDeployment(obj *unstructured.Unstructured) (*appsv1.Deployment, error) {
   295  	jsonByte, err := obj.MarshalJSON()
   296  	if err != nil {
   297  		return nil, fmt.Errorf("failed to convert deployment to json: %v", err)
   298  	}
   299  	depObj, _, err := dynamicDecoder.Decode(jsonByte, nil, nil)
   300  	if err != nil {
   301  		return nil, fmt.Errorf("failed to decode deployment object: %v", err)
   302  	}
   303  	switch o := depObj.(type) {
   304  	case *appsv1.Deployment:
   305  		return o, nil
   306  	default:
   307  		return nil, fmt.Errorf("conversion of runtime object to deployment failed (resulting runtime object not deployment type)")
   308  	}
   309  }
   310  
   311  // deploymentToUnstructured converts a deployment to an unstructured object
   312  func deploymentToUnstructured(dep *appsv1.Deployment) (*unstructured.Unstructured, error) {
   313  	jsonByte, err := json.Marshal(dep)
   314  	if err != nil {
   315  		return nil, fmt.Errorf("failed to remarshal deployment: %v", err)
   316  	}
   317  	obj := &unstructured.Unstructured{}
   318  	err = obj.UnmarshalJSON(jsonByte)
   319  	if err != nil {
   320  		return nil, fmt.Errorf("failed to unmarshal updated deployment: %v", err)
   321  	}
   322  	return obj, nil
   323  }
   324  
   325  // cleanupScorecard runs all cleanup functions in reverse order
   326  func cleanupScorecard() error {
   327  	failed := false
   328  	for i := len(cleanupFns) - 1; i >= 0; i-- {
   329  		err := cleanupFns[i]()
   330  		if err != nil {
   331  			failed = true
   332  			log.Printf("a cleanup function failed with error: %v\n", err)
   333  		}
   334  	}
   335  	if failed {
   336  		return fmt.Errorf("a cleanup function failed; see stdout for more details")
   337  	}
   338  	return nil
   339  }
   340  
   341  // addResourceCleanup adds a cleanup function for the specified runtime object
   342  func addResourceCleanup(obj runtime.Object, key types.NamespacedName) {
   343  	cleanupFns = append(cleanupFns, func() error {
   344  		// make a copy of the object because the client changes it
   345  		objCopy := obj.DeepCopyObject()
   346  		err := runtimeClient.Delete(context.TODO(), obj)
   347  		if err != nil && !apierrors.IsNotFound(err) {
   348  			return err
   349  		}
   350  		err = wait.PollImmediate(time.Second*1, time.Second*10, func() (bool, error) {
   351  			err = runtimeClient.Get(context.TODO(), key, objCopy)
   352  			if err != nil {
   353  				if apierrors.IsNotFound(err) {
   354  					return true, nil
   355  				}
   356  				return false, fmt.Errorf("error encountered during deletion of resource type %v with namespace/name (%+v): %v", objCopy.GetObjectKind().GroupVersionKind().Kind, key, err)
   357  			}
   358  			return false, nil
   359  		})
   360  		if err != nil {
   361  			return fmt.Errorf("cleanup function failed: %v", err)
   362  		}
   363  		return nil
   364  	})
   365  }
   366  
   367  func getProxyLogs(proxyPod *v1.Pod) (string, error) {
   368  	// need a standard kubeclient for pod logs
   369  	kubeclient, err := kubernetes.NewForConfig(kubeconfig)
   370  	if err != nil {
   371  		return "", fmt.Errorf("failed to create kubeclient: %v", err)
   372  	}
   373  	logOpts := &v1.PodLogOptions{Container: scorecardContainerName}
   374  	req := kubeclient.CoreV1().Pods(proxyPod.GetNamespace()).GetLogs(proxyPod.GetName(), logOpts)
   375  	readCloser, err := req.Stream()
   376  	if err != nil {
   377  		return "", fmt.Errorf("failed to get logs: %v", err)
   378  	}
   379  	defer readCloser.Close()
   380  	buf := new(bytes.Buffer)
   381  	_, err = buf.ReadFrom(readCloser)
   382  	if err != nil {
   383  		return "", fmt.Errorf("test failed and failed to read pod logs: %v", err)
   384  	}
   385  	return buf.String(), nil
   386  }