github.com/abayer/test-infra@v0.0.5/prow/cmd/plank/main.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package main
    18  
    19  import (
    20  	"flag"
    21  	"fmt"
    22  	"net/http"
    23  	"net/url"
    24  	"os"
    25  	"os/signal"
    26  	"syscall"
    27  	"time"
    28  
    29  	"github.com/prometheus/client_golang/prometheus/promhttp"
    30  	"github.com/sirupsen/logrus"
    31  	"k8s.io/apimachinery/pkg/labels"
    32  
    33  	"k8s.io/test-infra/prow/config"
    34  	"k8s.io/test-infra/prow/flagutil"
    35  	"k8s.io/test-infra/prow/github"
    36  	"k8s.io/test-infra/prow/kube"
    37  	"k8s.io/test-infra/prow/logrusutil"
    38  	"k8s.io/test-infra/prow/metrics"
    39  	"k8s.io/test-infra/prow/plank"
    40  )
    41  
    42  type options struct {
    43  	totURL string
    44  
    45  	configPath    string
    46  	jobConfigPath string
    47  	cluster       string
    48  	buildCluster  string
    49  	selector      string
    50  
    51  	githubEndpoint  flagutil.Strings
    52  	githubTokenFile string
    53  	dryRun          bool
    54  	deckURL         string
    55  }
    56  
    57  func gatherOptions() options {
    58  	o := options{
    59  		githubEndpoint: flagutil.NewStrings("https://api.github.com"),
    60  	}
    61  
    62  	flag.StringVar(&o.totURL, "tot-url", "", "Tot URL")
    63  
    64  	flag.StringVar(&o.configPath, "config-path", "/etc/config/config.yaml", "Path to config.yaml.")
    65  	flag.StringVar(&o.jobConfigPath, "job-config-path", "", "Path to prow job configs.")
    66  	flag.StringVar(&o.cluster, "cluster", "", "Path to kube.Cluster YAML file. If empty, uses the local cluster.")
    67  	flag.StringVar(&o.buildCluster, "build-cluster", "", "Path to file containing a YAML-marshalled kube.Cluster object. If empty, uses the local cluster.")
    68  	flag.StringVar(&o.selector, "label-selector", kube.EmptySelector, "Label selector to be applied in prowjobs. See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for constructing a label selector.")
    69  
    70  	flag.Var(&o.githubEndpoint, "github-endpoint", "GitHub's API endpoint.")
    71  	flag.StringVar(&o.githubTokenFile, "github-token-file", "/etc/github/oauth", "Path to the file containing the GitHub OAuth token.")
    72  	flag.BoolVar(&o.dryRun, "dry-run", true, "Whether or not to make mutating API calls to GitHub.")
    73  	flag.StringVar(&o.deckURL, "deck-url", "", "Deck URL for read-only access to the cluster.")
    74  	flag.Parse()
    75  	return o
    76  }
    77  
    78  func (o *options) Validate() error {
    79  	if _, err := labels.Parse(o.selector); err != nil {
    80  		return fmt.Errorf("parse label selector: %v", err)
    81  	}
    82  
    83  	return nil
    84  }
    85  
    86  func main() {
    87  	o := gatherOptions()
    88  	if err := o.Validate(); err != nil {
    89  		logrus.WithError(err).Fatal("Invalid options")
    90  	}
    91  
    92  	logrus.SetFormatter(
    93  		logrusutil.NewDefaultFieldsFormatter(nil, logrus.Fields{"component": "plank"}),
    94  	)
    95  
    96  	configAgent := &config.Agent{}
    97  	if err := configAgent.Start(o.configPath, o.jobConfigPath); err != nil {
    98  		logrus.WithError(err).Fatal("Error starting config agent.")
    99  	}
   100  
   101  	var err error
   102  	// Check if github endpoint is a valid url.
   103  	for _, ep := range o.githubEndpoint.Strings() {
   104  		_, err = url.ParseRequestURI(ep)
   105  		if err != nil {
   106  			logrus.WithError(err).Fatalf("Invalid --endpoint URL %q.", ep)
   107  		}
   108  	}
   109  
   110  	var ghc plank.GitHubClient
   111  	if o.githubTokenFile != "" {
   112  		secretAgent := &config.SecretAgent{}
   113  		if err := secretAgent.Start([]string{o.githubTokenFile}); err != nil {
   114  			logrus.WithError(err).Fatal("Error starting secrets agent.")
   115  		}
   116  
   117  		if o.dryRun {
   118  			if string(secretAgent.GetTokenGenerator(o.githubTokenFile)()) != "" {
   119  				ghc = github.NewDryRunClient(secretAgent.GetTokenGenerator(o.githubTokenFile),
   120  					o.githubEndpoint.Strings()...)
   121  			}
   122  		} else {
   123  			if string(secretAgent.GetTokenGenerator(o.githubTokenFile)()) != "" {
   124  				ghc = github.NewClient(secretAgent.GetTokenGenerator(o.githubTokenFile),
   125  					o.githubEndpoint.Strings()...)
   126  			}
   127  		}
   128  	}
   129  
   130  	var kc *kube.Client
   131  	var pkcs map[string]*kube.Client
   132  	if o.dryRun {
   133  		kc = kube.NewFakeClient(o.deckURL)
   134  		pkcs = map[string]*kube.Client{kube.DefaultClusterAlias: kc}
   135  	} else {
   136  		if o.cluster == "" {
   137  			kc, err = kube.NewClientInCluster(configAgent.Config().ProwJobNamespace)
   138  			if err != nil {
   139  				logrus.WithError(err).Fatal("Error getting kube client.")
   140  			}
   141  		} else {
   142  			kc, err = kube.NewClientFromFile(o.cluster, configAgent.Config().ProwJobNamespace)
   143  			if err != nil {
   144  				logrus.WithError(err).Fatal("Error getting kube client.")
   145  			}
   146  		}
   147  		if o.buildCluster == "" {
   148  			pkc, err := kube.NewClientInCluster(configAgent.Config().PodNamespace)
   149  			if err != nil {
   150  				logrus.WithError(err).Fatal("Error getting kube client.")
   151  			}
   152  			pkcs = map[string]*kube.Client{kube.DefaultClusterAlias: pkc}
   153  		} else {
   154  			pkcs, err = kube.ClientMapFromFile(o.buildCluster, configAgent.Config().PodNamespace)
   155  			if err != nil {
   156  				logrus.WithError(err).Fatal("Error getting kube client to build cluster.")
   157  			}
   158  		}
   159  	}
   160  
   161  	c, err := plank.NewController(kc, pkcs, ghc, nil, configAgent, o.totURL, o.selector)
   162  	if err != nil {
   163  		logrus.WithError(err).Fatal("Error creating plank controller.")
   164  	}
   165  
   166  	// Push metrics to the configured prometheus pushgateway endpoint.
   167  	pushGateway := configAgent.Config().PushGateway
   168  	if pushGateway.Endpoint != "" {
   169  		go metrics.PushMetrics("plank", pushGateway.Endpoint, pushGateway.Interval)
   170  	}
   171  	// serve prometheus metrics.
   172  	go serve()
   173  	// gather metrics for the jobs handled by plank.
   174  	go gather(c)
   175  
   176  	tick := time.Tick(30 * time.Second)
   177  	sig := make(chan os.Signal, 1)
   178  	signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
   179  
   180  	for {
   181  		select {
   182  		case <-tick:
   183  			start := time.Now()
   184  			if err := c.Sync(); err != nil {
   185  				logrus.WithError(err).Error("Error syncing.")
   186  			}
   187  			logrus.WithField("duration", fmt.Sprintf("%v", time.Since(start))).Info("Synced")
   188  		case <-sig:
   189  			logrus.Info("Plank is shutting down...")
   190  			return
   191  		}
   192  	}
   193  }
   194  
   195  // serve starts a http server and serves prometheus metrics.
   196  // Meant to be called inside a goroutine.
   197  func serve() {
   198  	http.Handle("/metrics", promhttp.Handler())
   199  	logrus.WithError(http.ListenAndServe(":8080", nil)).Fatal("ListenAndServe returned.")
   200  }
   201  
   202  // gather metrics from plank.
   203  // Meant to be called inside a goroutine.
   204  func gather(c *plank.Controller) {
   205  	tick := time.Tick(30 * time.Second)
   206  	sig := make(chan os.Signal, 1)
   207  	signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
   208  
   209  	for {
   210  		select {
   211  		case <-tick:
   212  			start := time.Now()
   213  			c.SyncMetrics()
   214  			logrus.WithField("metrics-duration", fmt.Sprintf("%v", time.Since(start))).Debug("Metrics synced")
   215  		case <-sig:
   216  			logrus.Debug("Plank gatherer is shutting down...")
   217  			return
   218  		}
   219  	}
   220  }