github.com/munnerz/test-infra@v0.0.0-20190108210205-ce3d181dc989/prow/cmd/build/main.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package main
    18  
    19  import (
    20  	"errors"
    21  	"flag"
    22  	"fmt"
    23  	"io/ioutil"
    24  	"os"
    25  	"os/signal"
    26  	"syscall"
    27  	"time"
    28  
    29  	prowjobset "k8s.io/test-infra/prow/client/clientset/versioned"
    30  	prowjobinfo "k8s.io/test-infra/prow/client/informers/externalversions"
    31  	"k8s.io/test-infra/prow/config"
    32  	"k8s.io/test-infra/prow/kube"
    33  	"k8s.io/test-infra/prow/logrusutil"
    34  
    35  	buildset "github.com/knative/build/pkg/client/clientset/versioned"
    36  	buildinfo "github.com/knative/build/pkg/client/informers/externalversions"
    37  	buildinfov1alpha1 "github.com/knative/build/pkg/client/informers/externalversions/build/v1alpha1"
    38  	"github.com/sirupsen/logrus"
    39  	"golang.org/x/time/rate"
    40  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    41  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    42  	"k8s.io/client-go/kubernetes"
    43  	"k8s.io/client-go/rest"
    44  	"k8s.io/client-go/tools/clientcmd"
    45  	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
    46  	"k8s.io/client-go/util/workqueue"
    47  
    48  	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // support gcp users in .kube/config
    49  )
    50  
    51  type options struct {
    52  	allContexts  bool
    53  	buildCluster string
    54  	config       string
    55  	kubeconfig   string
    56  	totURL       string
    57  
    58  	// Create these values by following:
    59  	//   https://github.com/kelseyhightower/grafeas-tutorial/blob/master/pki/gen-certs.sh
    60  	cert       string
    61  	privateKey string
    62  }
    63  
    64  func parseOptions() options {
    65  	var o options
    66  	if err := o.parse(flag.CommandLine, os.Args[1:]); err != nil {
    67  		logrus.Fatalf("Invalid flags: %v", err)
    68  	}
    69  	return o
    70  }
    71  
    72  func (o *options) parse(flags *flag.FlagSet, args []string) error {
    73  	flags.BoolVar(&o.allContexts, "all-contexts", false, "Monitor all cluster contexts, not just default")
    74  	flags.StringVar(&o.totURL, "tot-url", "", "Tot URL")
    75  	flags.StringVar(&o.kubeconfig, "kubeconfig", "", "Path to kubeconfig. Only required if out of cluster")
    76  	flags.StringVar(&o.config, "config", "", "Path to prow config.yaml")
    77  	flags.StringVar(&o.buildCluster, "build-cluster", "", "Path to file containing a YAML-marshalled kube.Cluster object. If empty, uses the local cluster.")
    78  	flags.StringVar(&o.cert, "tls-cert-file", "", "Path to x509 certificate for HTTPS")
    79  	flags.StringVar(&o.privateKey, "tls-private-key-file", "", "Path to matching x509 private key.")
    80  	flags.Parse(args)
    81  	if (len(o.cert) == 0) != (len(o.privateKey) == 0) {
    82  		return errors.New("Both --tls-cert-file and --tls-private-key-file are required for HTTPS")
    83  	}
    84  	if o.kubeconfig != "" && o.buildCluster != "" {
    85  		return errors.New("deprecated --build-cluster may not be used with --kubeconfig")
    86  	}
    87  	if o.buildCluster != "" {
    88  		// TODO(fejta): change to warn and add a term date after plank migration
    89  		logrus.Infof("--build-cluster is deprecated, please switch to --kubeconfig")
    90  	}
    91  	return nil
    92  }
    93  
    94  // stopper returns a channel that remains open until an interrupt is received.
    95  func stopper() chan struct{} {
    96  	stop := make(chan struct{})
    97  	c := make(chan os.Signal, 2)
    98  	signal.Notify(c, os.Interrupt, syscall.SIGTERM)
    99  	go func() {
   100  		<-c
   101  		logrus.Warn("Interrupt received, attempting clean shutdown...")
   102  		close(stop)
   103  		<-c
   104  		logrus.Error("Second interrupt received, force exiting...")
   105  		os.Exit(1)
   106  	}()
   107  	return stop
   108  }
   109  
   110  // contextConfigs returns a context => config mapping as well as the default context.
   111  //
   112  // Returns an error if kubeconfig is specified and invalid
   113  // Returns an error if no contexts are found.
   114  func contextConfigs(kubeconfig, buildCluster string) (map[string]rest.Config, string, error) {
   115  	logrus.Infof("Loading cluster contexts...")
   116  	configs := map[string]rest.Config{}
   117  	var defCtx *string
   118  	// This will work if we are running inside kubernetes
   119  	if localCfg, err := rest.InClusterConfig(); err != nil {
   120  		logrus.Warnf("Failed to create in-cluster config: %v", err)
   121  	} else {
   122  		defCtx = new(string)
   123  		logrus.Info("* in-cluster")
   124  		configs[*defCtx] = *localCfg
   125  	}
   126  
   127  	// Attempt to load external clusters too
   128  	var loader clientcmd.ClientConfigLoader
   129  	if kubeconfig != "" { // load from --kubeconfig
   130  		loader = &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}
   131  	} else {
   132  		loader = clientcmd.NewDefaultClientConfigLoadingRules()
   133  	}
   134  
   135  	cfg, err := loader.Load()
   136  	switch {
   137  	case err != nil && kubeconfig != "":
   138  		return nil, "", fmt.Errorf("load %s kubecfg: %v", kubeconfig, err)
   139  	case err != nil:
   140  		logrus.Warnf("failed to load any kubecfg files: %v", err)
   141  	default:
   142  		// normally defCtx is in cluster (""), but we may be a dev running on their workstation
   143  		// in which case rest.InClusterConfig() will fail, so use the current context as default
   144  		// (which is where we look for prowjobs)
   145  		if defCtx == nil && cfg.CurrentContext != "" {
   146  			defCtx = &cfg.CurrentContext
   147  		}
   148  
   149  		for context := range cfg.Contexts {
   150  			logrus.Infof("* %s", context)
   151  			contextCfg, err := clientcmd.NewNonInteractiveClientConfig(*cfg, context, &clientcmd.ConfigOverrides{}, loader).ClientConfig()
   152  			if err != nil {
   153  				return nil, "", fmt.Errorf("create %s client: %v", context, err)
   154  			}
   155  			configs[context] = *contextCfg
   156  		}
   157  	}
   158  
   159  	if buildCluster != "" { // load from --build-cluster
   160  		data, err := ioutil.ReadFile(buildCluster)
   161  		if err != nil {
   162  			return nil, "", fmt.Errorf("read build clusters: %v", err)
   163  		}
   164  		raw, err := kube.UnmarshalClusterMap(data)
   165  		if err != nil {
   166  			return nil, "", fmt.Errorf("unmarshal build clusters: %v", err)
   167  		}
   168  		cfg = &clientcmdapi.Config{
   169  			Clusters:  map[string]*clientcmdapi.Cluster{},
   170  			AuthInfos: map[string]*clientcmdapi.AuthInfo{},
   171  			Contexts:  map[string]*clientcmdapi.Context{},
   172  		}
   173  		for alias, config := range raw {
   174  			cfg.Clusters[alias] = &clientcmdapi.Cluster{
   175  				Server:                   config.Endpoint,
   176  				CertificateAuthorityData: config.ClusterCACertificate,
   177  			}
   178  			cfg.AuthInfos[alias] = &clientcmdapi.AuthInfo{
   179  				ClientCertificateData: config.ClientCertificate,
   180  				ClientKeyData:         config.ClientKey,
   181  			}
   182  			cfg.Contexts[alias] = &clientcmdapi.Context{
   183  				Cluster:  alias,
   184  				AuthInfo: alias,
   185  				// TODO(fejta): Namespace?
   186  			}
   187  		}
   188  		for context := range cfg.Contexts {
   189  			logrus.Infof("* %s", context)
   190  			contextCfg, err := clientcmd.NewNonInteractiveClientConfig(*cfg, context, &clientcmd.ConfigOverrides{}, nil).ClientConfig()
   191  			if err != nil {
   192  				return nil, "", fmt.Errorf("create %s client: %v", context, err)
   193  			}
   194  			configs[context] = *contextCfg
   195  		}
   196  	}
   197  
   198  	if len(configs) == 0 {
   199  		return nil, "", errors.New("no clients found")
   200  	}
   201  	return configs, *defCtx, nil
   202  }
   203  
   204  type buildConfig struct {
   205  	client   buildset.Interface
   206  	informer buildinfov1alpha1.BuildInformer
   207  }
   208  
   209  // newBuildConfig returns a client and informer capable of mutating and monitoring the specified config.
   210  func newBuildConfig(cfg rest.Config, stop chan struct{}) (*buildConfig, error) {
   211  	bc, err := buildset.NewForConfig(&cfg)
   212  	if err != nil {
   213  		return nil, err
   214  	}
   215  
   216  	// Ensure the knative-build CRD is deployed
   217  	// TODO(fejta): probably a better way to do this
   218  	_, err = bc.Build().Builds("").List(metav1.ListOptions{Limit: 1})
   219  	if err != nil {
   220  		return nil, err
   221  	}
   222  	// Assume watches receive updates, but resync every 30m in case something wonky happens
   223  	bif := buildinfo.NewSharedInformerFactory(bc, 30*time.Minute)
   224  	bif.Build().V1alpha1().Builds().Lister()
   225  	go bif.Start(stop)
   226  	return &buildConfig{
   227  		client:   bc,
   228  		informer: bif.Build().V1alpha1().Builds(),
   229  	}, nil
   230  }
   231  
   232  func rateLimiter() limiter {
   233  	rl := workqueue.NewMaxOfRateLimiter(
   234  		workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 120*time.Second),
   235  		&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(1000), 50000)},
   236  	)
   237  	return workqueue.NewNamedRateLimitingQueue(rl, controllerName)
   238  }
   239  
   240  func main() {
   241  	o := parseOptions()
   242  	logrusutil.NewDefaultFieldsFormatter(nil, logrus.Fields{"component": "build"})
   243  
   244  	pjNamespace := ""
   245  	if o.config != "" {
   246  		pc, err := config.Load(o.config, "") // ignore jobConfig
   247  		if err != nil {
   248  			logrus.WithError(err).Fatal("failed to load prow config")
   249  		}
   250  		pjNamespace = pc.ProwJobNamespace
   251  	}
   252  
   253  	configs, defaultContext, err := contextConfigs(o.kubeconfig, o.buildCluster)
   254  	if err != nil {
   255  		logrus.WithError(err).Fatal("Error building client configs")
   256  	}
   257  
   258  	if !o.allContexts { // Just the default context please
   259  		logrus.Warnf("Truncating to a single cluster: %s", defaultContext)
   260  		configs = map[string]rest.Config{defaultContext: configs[defaultContext]}
   261  	}
   262  	defaultConfig := configs[defaultContext]
   263  
   264  	stop := stopper()
   265  
   266  	kc, err := kubernetes.NewForConfig(&defaultConfig)
   267  	if err != nil {
   268  		logrus.WithError(err).Fatalf("Failed to create %s kubernetes client", defaultContext)
   269  	}
   270  	pjc, err := prowjobset.NewForConfig(&defaultConfig)
   271  	if err != nil {
   272  		logrus.WithError(err).Fatal("Failed to create prowjob client")
   273  	}
   274  	pjif := prowjobinfo.NewSharedInformerFactory(pjc, 30*time.Minute)
   275  	pjif.Prow().V1().ProwJobs().Lister()
   276  	go pjif.Start(stop)
   277  
   278  	buildConfigs := map[string]buildConfig{}
   279  	for context, cfg := range configs {
   280  		var bc *buildConfig
   281  		bc, err = newBuildConfig(cfg, stop)
   282  		if apierrors.IsNotFound(err) {
   283  			logrus.WithError(err).Warnf("Ignoring %s: knative build CRD not deployed", context)
   284  			continue
   285  		}
   286  		if err != nil {
   287  			logrus.WithError(err).Fatalf("Failed to create %s build client", context)
   288  		}
   289  		buildConfigs[context] = *bc
   290  	}
   291  
   292  	// TODO(fejta): move to its own binary
   293  	if len(o.cert) > 0 {
   294  		go runServer(o.cert, o.privateKey)
   295  	}
   296  
   297  	controller := newController(kc, pjc, pjif.Prow().V1().ProwJobs(), buildConfigs, o.totURL, pjNamespace, rateLimiter())
   298  	if err := controller.Run(2, stop); err != nil {
   299  		logrus.WithError(err).Fatal("Error running controller")
   300  	}
   301  	logrus.Info("Finished")
   302  }