github.com/pf-qiu/concourse/v6@v6.7.3-0.20201207032516-1f455d73275f/atc/atccmd/command.go (about)

     1  package atccmd
     2  
     3  import (
     4  	"context"
     5  	"crypto/tls"
     6  	"crypto/x509"
     7  	"database/sql"
     8  	"errors"
     9  	"fmt"
    10  	"io/ioutil"
    11  	"net"
    12  	"net/http"
    13  	_ "net/http/pprof"
    14  	"net/url"
    15  	"os"
    16  	"strings"
    17  	"time"
    18  
    19  	"code.cloudfoundry.org/clock"
    20  	"code.cloudfoundry.org/lager"
    21  	"code.cloudfoundry.org/lager/lagerctx"
    22  	"github.com/pf-qiu/concourse/v6"
    23  	"github.com/pf-qiu/concourse/v6/atc"
    24  	"github.com/pf-qiu/concourse/v6/atc/api"
    25  	"github.com/pf-qiu/concourse/v6/atc/api/accessor"
    26  	"github.com/pf-qiu/concourse/v6/atc/api/auth"
    27  	"github.com/pf-qiu/concourse/v6/atc/api/buildserver"
    28  	"github.com/pf-qiu/concourse/v6/atc/api/containerserver"
    29  	"github.com/pf-qiu/concourse/v6/atc/api/pipelineserver"
    30  	"github.com/pf-qiu/concourse/v6/atc/api/policychecker"
    31  	"github.com/pf-qiu/concourse/v6/atc/auditor"
    32  	"github.com/pf-qiu/concourse/v6/atc/builds"
    33  	"github.com/pf-qiu/concourse/v6/atc/component"
    34  	"github.com/pf-qiu/concourse/v6/atc/compression"
    35  	"github.com/pf-qiu/concourse/v6/atc/creds"
    36  	"github.com/pf-qiu/concourse/v6/atc/creds/noop"
    37  	"github.com/pf-qiu/concourse/v6/atc/db"
    38  	"github.com/pf-qiu/concourse/v6/atc/db/encryption"
    39  	"github.com/pf-qiu/concourse/v6/atc/db/lock"
    40  	"github.com/pf-qiu/concourse/v6/atc/db/migration"
    41  	"github.com/pf-qiu/concourse/v6/atc/engine"
    42  	"github.com/pf-qiu/concourse/v6/atc/gc"
    43  	"github.com/pf-qiu/concourse/v6/atc/lidar"
    44  	"github.com/pf-qiu/concourse/v6/atc/metric"
    45  	"github.com/pf-qiu/concourse/v6/atc/policy"
    46  	"github.com/pf-qiu/concourse/v6/atc/resource"
    47  	"github.com/pf-qiu/concourse/v6/atc/scheduler"
    48  	"github.com/pf-qiu/concourse/v6/atc/scheduler/algorithm"
    49  	"github.com/pf-qiu/concourse/v6/atc/syslog"
    50  	"github.com/pf-qiu/concourse/v6/atc/worker"
    51  	"github.com/pf-qiu/concourse/v6/atc/worker/image"
    52  	"github.com/pf-qiu/concourse/v6/atc/wrappa"
    53  	"github.com/pf-qiu/concourse/v6/skymarshal/dexserver"
    54  	"github.com/pf-qiu/concourse/v6/skymarshal/legacyserver"
    55  	"github.com/pf-qiu/concourse/v6/skymarshal/skycmd"
    56  	"github.com/pf-qiu/concourse/v6/skymarshal/skyserver"
    57  	"github.com/pf-qiu/concourse/v6/skymarshal/storage"
    58  	"github.com/pf-qiu/concourse/v6/skymarshal/token"
    59  	"github.com/pf-qiu/concourse/v6/tracing"
    60  	"github.com/pf-qiu/concourse/v6/web"
    61  	"github.com/concourse/flag"
    62  	"github.com/concourse/retryhttp"
    63  	"gopkg.in/square/go-jose.v2/jwt"
    64  
    65  	"github.com/cppforlife/go-semi-semantic/version"
    66  	"github.com/hashicorp/go-multierror"
    67  	"github.com/jessevdk/go-flags"
    68  	gocache "github.com/patrickmn/go-cache"
    69  	"github.com/tedsuo/ifrit"
    70  	"github.com/tedsuo/ifrit/grouper"
    71  	"github.com/tedsuo/ifrit/http_server"
    72  	"github.com/tedsuo/ifrit/sigmon"
    73  	"golang.org/x/crypto/acme"
    74  	"golang.org/x/crypto/acme/autocert"
    75  	"golang.org/x/oauth2"
    76  	"golang.org/x/time/rate"
    77  	"gopkg.in/yaml.v2"
    78  
    79  	// dynamically registered metric emitters
    80  	_ "github.com/pf-qiu/concourse/v6/atc/metric/emitter"
    81  
    82  	// dynamically registered policy checkers
    83  	_ "github.com/pf-qiu/concourse/v6/atc/policy/opa"
    84  
    85  	// dynamically registered credential managers
    86  	_ "github.com/pf-qiu/concourse/v6/atc/creds/conjur"
    87  	_ "github.com/pf-qiu/concourse/v6/atc/creds/credhub"
    88  	_ "github.com/pf-qiu/concourse/v6/atc/creds/dummy"
    89  	_ "github.com/pf-qiu/concourse/v6/atc/creds/kubernetes"
    90  	_ "github.com/pf-qiu/concourse/v6/atc/creds/secretsmanager"
    91  	_ "github.com/pf-qiu/concourse/v6/atc/creds/ssm"
    92  	_ "github.com/pf-qiu/concourse/v6/atc/creds/vault"
    93  )
    94  
    95  const algorithmLimitRows = 100
    96  
    97  var schedulerCache = gocache.New(10*time.Second, 10*time.Second)
    98  
    99  var defaultDriverName = "postgres"
   100  var retryingDriverName = "too-many-connections-retrying"
   101  
   102  var flyClientID = "fly"
   103  var flyClientSecret = "Zmx5"
   104  
   105  var workerAvailabilityPollingInterval = 5 * time.Second
   106  var workerStatusPublishInterval = 1 * time.Minute
   107  
   108  type ATCCommand struct {
   109  	RunCommand RunCommand `command:"run"`
   110  	Migration  Migration  `command:"migrate"`
   111  }
   112  
   113  type RunCommand struct {
   114  	Logger flag.Lager
   115  
   116  	varSourcePool creds.VarSourcePool
   117  
   118  	BindIP   flag.IP `long:"bind-ip"   default:"0.0.0.0" description:"IP address on which to listen for web traffic."`
   119  	BindPort uint16  `long:"bind-port" default:"8080"    description:"Port on which to listen for HTTP traffic."`
   120  
   121  	TLSBindPort uint16    `long:"tls-bind-port" description:"Port on which to listen for HTTPS traffic."`
   122  	TLSCert     flag.File `long:"tls-cert"      description:"File containing an SSL certificate."`
   123  	TLSKey      flag.File `long:"tls-key"       description:"File containing an RSA private key, used to encrypt HTTPS traffic."`
   124  
   125  	LetsEncrypt struct {
   126  		Enable  bool     `long:"enable-lets-encrypt"   description:"Automatically configure TLS certificates via Let's Encrypt/ACME."`
   127  		ACMEURL flag.URL `long:"lets-encrypt-acme-url" description:"URL of the ACME CA directory endpoint." default:"https://acme-v02.api.letsencrypt.org/directory"`
   128  	} `group:"Let's Encrypt Configuration"`
   129  
   130  	ExternalURL flag.URL `long:"external-url" description:"URL used to reach any ATC from the outside world."`
   131  
   132  	Postgres flag.PostgresConfig `group:"PostgreSQL Configuration" namespace:"postgres"`
   133  
   134  	ConcurrentRequestLimits   map[wrappa.LimitedRoute]int `long:"concurrent-request-limit" description:"Limit the number of concurrent requests to an API endpoint (Example: ListAllJobs:5)"`
   135  	APIMaxOpenConnections     int                         `long:"api-max-conns" description:"The maximum number of open connections for the api connection pool." default:"10"`
   136  	BackendMaxOpenConnections int                         `long:"backend-max-conns" description:"The maximum number of open connections for the backend connection pool." default:"50"`
   137  
   138  	CredentialManagement creds.CredentialManagementConfig `group:"Credential Management"`
   139  	CredentialManagers   creds.Managers
   140  
   141  	EncryptionKey    flag.Cipher `long:"encryption-key"     description:"A 16 or 32 length key used to encrypt sensitive information before storing it in the database."`
   142  	OldEncryptionKey flag.Cipher `long:"old-encryption-key" description:"Encryption key previously used for encrypting sensitive information. If provided without a new key, data is encrypted. If provided with a new key, data is re-encrypted."`
   143  
   144  	DebugBindIP   flag.IP `long:"debug-bind-ip"   default:"127.0.0.1" description:"IP address on which to listen for the pprof debugger endpoints."`
   145  	DebugBindPort uint16  `long:"debug-bind-port" default:"8079"      description:"Port on which to listen for the pprof debugger endpoints."`
   146  
   147  	InterceptIdleTimeout time.Duration `long:"intercept-idle-timeout" default:"0m" description:"Length of time for a intercepted session to be idle before terminating."`
   148  
   149  	ComponentRunnerInterval time.Duration `long:"component-runner-interval" default:"10s" description:"Interval on which runners are kicked off for builds, locks, scans, and checks"`
   150  
   151  	LidarScannerInterval time.Duration `long:"lidar-scanner-interval" default:"10s" description:"Interval on which the resource scanner will run to see if new checks need to be scheduled"`
   152  
   153  	GlobalResourceCheckTimeout          time.Duration `long:"global-resource-check-timeout" default:"1h" description:"Time limit on checking for new versions of resources."`
   154  	ResourceCheckingInterval            time.Duration `long:"resource-checking-interval" default:"1m" description:"Interval on which to check for new versions of resources."`
   155  	ResourceWithWebhookCheckingInterval time.Duration `long:"resource-with-webhook-checking-interval" default:"1m" description:"Interval on which to check for new versions of resources that has webhook defined."`
   156  	MaxChecksPerSecond                  int           `long:"max-checks-per-second" description:"Maximum number of checks that can be started per second. If not specified, this will be calculated as (# of resources)/(resource checking interval). -1 value will remove this maximum limit of checks per second."`
   157  
   158  	ContainerPlacementStrategyOptions worker.ContainerPlacementStrategyOptions `group:"Container Placement Strategy"`
   159  
   160  	BaggageclaimResponseHeaderTimeout time.Duration `long:"baggageclaim-response-header-timeout" default:"1m" description:"How long to wait for Baggageclaim to send the response header."`
   161  	StreamingArtifactsCompression     string        `long:"streaming-artifacts-compression" default:"gzip" choice:"gzip" choice:"zstd" description:"Compression algorithm for internal streaming."`
   162  
   163  	GardenRequestTimeout time.Duration `long:"garden-request-timeout" default:"5m" description:"How long to wait for requests to Garden to complete. 0 means no timeout."`
   164  
   165  	CLIArtifactsDir flag.Dir `long:"cli-artifacts-dir" description:"Directory containing downloadable CLI binaries."`
   166  
   167  	Metrics struct {
   168  		HostName            string            `long:"metrics-host-name" description:"Host string to attach to emitted metrics."`
   169  		Attributes          map[string]string `long:"metrics-attribute" description:"A key-value attribute to attach to emitted metrics. Can be specified multiple times." value-name:"NAME:VALUE"`
   170  		BufferSize          uint32            `long:"metrics-buffer-size" default:"1000" description:"The size of the buffer used in emitting event metrics."`
   171  		CaptureErrorMetrics bool              `long:"capture-error-metrics" description:"Enable capturing of error log metrics"`
   172  	} `group:"Metrics & Diagnostics"`
   173  
   174  	Tracing tracing.Config `group:"Tracing" namespace:"tracing"`
   175  
   176  	PolicyCheckers struct {
   177  		Filter policy.Filter
   178  	} `group:"Policy Checking"`
   179  
   180  	Server struct {
   181  		XFrameOptions string `long:"x-frame-options" default:"deny" description:"The value to set for X-Frame-Options."`
   182  		ClusterName   string `long:"cluster-name" description:"A name for this Concourse cluster, to be displayed on the dashboard page."`
   183  		ClientID      string `long:"client-id" default:"concourse-web" description:"Client ID to use for login flow"`
   184  		ClientSecret  string `long:"client-secret" required:"true" description:"Client secret to use for login flow"`
   185  	} `group:"Web Server"`
   186  
   187  	LogDBQueries   bool `long:"log-db-queries" description:"Log database queries."`
   188  	LogClusterName bool `long:"log-cluster-name" description:"Log cluster name."`
   189  
   190  	GC struct {
   191  		Interval time.Duration `long:"interval" default:"30s" description:"Interval on which to perform garbage collection."`
   192  
   193  		OneOffBuildGracePeriod time.Duration `long:"one-off-grace-period" default:"5m" description:"Period after which one-off build containers will be garbage-collected."`
   194  		MissingGracePeriod     time.Duration `long:"missing-grace-period" default:"5m" description:"Period after which to reap containers and volumes that were created but went missing from the worker."`
   195  		HijackGracePeriod      time.Duration `long:"hijack-grace-period" default:"5m" description:"Period after which hijacked containers will be garbage collected"`
   196  		FailedGracePeriod      time.Duration `long:"failed-grace-period" default:"120h" description:"Period after which failed containers will be garbage collected"`
   197  		CheckRecyclePeriod     time.Duration `long:"check-recycle-period" default:"1m" description:"Period after which to reap checks that are completed."`
   198  	} `group:"Garbage Collection" namespace:"gc"`
   199  
   200  	BuildTrackerInterval time.Duration `long:"build-tracker-interval" default:"10s" description:"Interval on which to run build tracking."`
   201  
   202  	TelemetryOptIn bool `long:"telemetry-opt-in" hidden:"true" description:"Enable anonymous concourse version reporting."`
   203  
   204  	DefaultBuildLogsToRetain uint64 `long:"default-build-logs-to-retain" description:"Default build logs to retain, 0 means all"`
   205  	MaxBuildLogsToRetain     uint64 `long:"max-build-logs-to-retain" description:"Maximum build logs to retain, 0 means not specified. Will override values configured in jobs"`
   206  
   207  	DefaultDaysToRetainBuildLogs uint64 `long:"default-days-to-retain-build-logs" description:"Default days to retain build logs. 0 means unlimited"`
   208  	MaxDaysToRetainBuildLogs     uint64 `long:"max-days-to-retain-build-logs" description:"Maximum days to retain build logs, 0 means not specified. Will override values configured in jobs"`
   209  
   210  	JobSchedulingMaxInFlight uint64 `long:"job-scheduling-max-in-flight" default:"32" description:"Maximum number of jobs to be scheduling at the same time"`
   211  
   212  	DefaultCpuLimit    *int    `long:"default-task-cpu-limit" description:"Default max number of cpu shares per task, 0 means unlimited"`
   213  	DefaultMemoryLimit *string `long:"default-task-memory-limit" description:"Default maximum memory per task, 0 means unlimited"`
   214  
   215  	Auditor struct {
   216  		EnableBuildAuditLog     bool `long:"enable-build-auditing" description:"Enable auditing for all api requests connected to builds."`
   217  		EnableContainerAuditLog bool `long:"enable-container-auditing" description:"Enable auditing for all api requests connected to containers."`
   218  		EnableJobAuditLog       bool `long:"enable-job-auditing" description:"Enable auditing for all api requests connected to jobs."`
   219  		EnablePipelineAuditLog  bool `long:"enable-pipeline-auditing" description:"Enable auditing for all api requests connected to pipelines."`
   220  		EnableResourceAuditLog  bool `long:"enable-resource-auditing" description:"Enable auditing for all api requests connected to resources."`
   221  		EnableSystemAuditLog    bool `long:"enable-system-auditing" description:"Enable auditing for all api requests connected to system transactions."`
   222  		EnableTeamAuditLog      bool `long:"enable-team-auditing" description:"Enable auditing for all api requests connected to teams."`
   223  		EnableWorkerAuditLog    bool `long:"enable-worker-auditing" description:"Enable auditing for all api requests connected to workers."`
   224  		EnableVolumeAuditLog    bool `long:"enable-volume-auditing" description:"Enable auditing for all api requests connected to volumes."`
   225  	}
   226  
   227  	Syslog struct {
   228  		Hostname      string        `long:"syslog-hostname" description:"Client hostname with which the build logs will be sent to the syslog server." default:"atc-syslog-drainer"`
   229  		Address       string        `long:"syslog-address" description:"Remote syslog server address with port (Example: 0.0.0.0:514)."`
   230  		Transport     string        `long:"syslog-transport" description:"Transport protocol for syslog messages (Currently supporting tcp, udp & tls)."`
   231  		DrainInterval time.Duration `long:"syslog-drain-interval" description:"Interval over which checking is done for new build logs to send to syslog server (duration measurement units are s/m/h; eg. 30s/30m/1h)" default:"30s"`
   232  		CACerts       []string      `long:"syslog-ca-cert"              description:"Paths to PEM-encoded CA cert files to use to verify the Syslog server SSL cert."`
   233  	} ` group:"Syslog Drainer Configuration"`
   234  
   235  	Auth struct {
   236  		AuthFlags     skycmd.AuthFlags
   237  		MainTeamFlags skycmd.AuthTeamFlags `group:"Authentication (Main Team)" namespace:"main-team"`
   238  	} `group:"Authentication"`
   239  
   240  	ConfigRBAC flag.File `long:"config-rbac" description:"Customize RBAC role-action mapping."`
   241  
   242  	SystemClaimKey    string   `long:"system-claim-key" default:"aud" description:"The token claim key to use when matching system-claim-values"`
   243  	SystemClaimValues []string `long:"system-claim-value" default:"concourse-worker" description:"Configure which token requests should be considered 'system' requests."`
   244  
   245  	FeatureFlags struct {
   246  		EnableGlobalResources                bool `long:"enable-global-resources" description:"Enable equivalent resources across pipelines and teams to share a single version history."`
   247  		EnableRedactSecrets                  bool `long:"enable-redact-secrets" description:"Enable redacting secrets in build logs."`
   248  		EnableBuildRerunWhenWorkerDisappears bool `long:"enable-rerun-when-worker-disappears" description:"Enable automatically build rerun when worker disappears or a network error occurs"`
   249  		EnableAcrossStep                     bool `long:"enable-across-step" description:"Enable the experimental across step to be used in jobs. The API is subject to change."`
   250  		EnablePipelineInstances              bool `long:"enable-pipeline-instances" description:"Enable pipeline instances"`
   251  		EnableP2PVolumeStreaming             bool `long:"enable-p2p-volume-streaming" description:"Enable P2P volume streaming"`
   252  	} `group:"Feature Flags"`
   253  
   254  	BaseResourceTypeDefaults flag.File `long:"base-resource-type-defaults" description:"Base resource type defaults"`
   255  
   256  	P2pVolumeStreamingTimeout time.Duration `long:"p2p-volume-streaming-timeout" description:"Timeout value of p2p volume streaming" default:"15m"`
   257  }
   258  
   259  type Migration struct {
   260  	Postgres           flag.PostgresConfig `group:"PostgreSQL Configuration" namespace:"postgres"`
   261  	EncryptionKey      flag.Cipher         `long:"encryption-key"     description:"A 16 or 32 length key used to encrypt sensitive information before storing it in the database."`
   262  	OldEncryptionKey   flag.Cipher         `long:"old-encryption-key" description:"Encryption key previously used for encrypting sensitive information. If provided without a new key, data is decrypted. If provided with a new key, data is re-encrypted."`
   263  	CurrentDBVersion   bool                `long:"current-db-version" description:"Print the current database version and exit"`
   264  	SupportedDBVersion bool                `long:"supported-db-version" description:"Print the max supported database version and exit"`
   265  	MigrateDBToVersion int                 `long:"migrate-db-to-version" description:"Migrate to the specified database version and exit"`
   266  }
   267  
   268  func (m *Migration) Execute(args []string) error {
   269  	if m.CurrentDBVersion {
   270  		return m.currentDBVersion()
   271  	}
   272  	if m.SupportedDBVersion {
   273  		return m.supportedDBVersion()
   274  	}
   275  	if m.MigrateDBToVersion > 0 {
   276  		return m.migrateDBToVersion()
   277  	}
   278  	if m.OldEncryptionKey.AEAD != nil {
   279  		return m.rotateEncryptionKey()
   280  	}
   281  	return errors.New("must specify one of `--current-db-version`, `--supported-db-version`, `--migrate-db-to-version`, or `--old-encryption-key`")
   282  
   283  }
   284  
   285  func (cmd *Migration) currentDBVersion() error {
   286  	helper := migration.NewOpenHelper(
   287  		defaultDriverName,
   288  		cmd.Postgres.ConnectionString(),
   289  		nil,
   290  		nil,
   291  		nil,
   292  	)
   293  
   294  	version, err := helper.CurrentVersion()
   295  	if err != nil {
   296  		return err
   297  	}
   298  
   299  	fmt.Println(version)
   300  	return nil
   301  }
   302  
   303  func (cmd *Migration) supportedDBVersion() error {
   304  	helper := migration.NewOpenHelper(
   305  		defaultDriverName,
   306  		cmd.Postgres.ConnectionString(),
   307  		nil,
   308  		nil,
   309  		nil,
   310  	)
   311  
   312  	version, err := helper.SupportedVersion()
   313  	if err != nil {
   314  		return err
   315  	}
   316  
   317  	fmt.Println(version)
   318  	return nil
   319  }
   320  
   321  func (cmd *Migration) migrateDBToVersion() error {
   322  	version := cmd.MigrateDBToVersion
   323  
   324  	var newKey *encryption.Key
   325  	var oldKey *encryption.Key
   326  
   327  	if cmd.EncryptionKey.AEAD != nil {
   328  		newKey = encryption.NewKey(cmd.EncryptionKey.AEAD)
   329  	}
   330  	if cmd.OldEncryptionKey.AEAD != nil {
   331  		oldKey = encryption.NewKey(cmd.OldEncryptionKey.AEAD)
   332  	}
   333  
   334  	helper := migration.NewOpenHelper(
   335  		defaultDriverName,
   336  		cmd.Postgres.ConnectionString(),
   337  		nil,
   338  		newKey,
   339  		oldKey,
   340  	)
   341  
   342  	err := helper.MigrateToVersion(version)
   343  	if err != nil {
   344  		return fmt.Errorf("Could not migrate to version: %d Reason: %s", version, err.Error())
   345  	}
   346  
   347  	fmt.Println("Successfully migrated to version:", version)
   348  	return nil
   349  }
   350  
   351  func (cmd *Migration) rotateEncryptionKey() error {
   352  	var newKey *encryption.Key
   353  	var oldKey *encryption.Key
   354  
   355  	if cmd.EncryptionKey.AEAD != nil {
   356  		newKey = encryption.NewKey(cmd.EncryptionKey.AEAD)
   357  	}
   358  	if cmd.OldEncryptionKey.AEAD != nil {
   359  		oldKey = encryption.NewKey(cmd.OldEncryptionKey.AEAD)
   360  	}
   361  
   362  	helper := migration.NewOpenHelper(
   363  		defaultDriverName,
   364  		cmd.Postgres.ConnectionString(),
   365  		nil,
   366  		newKey,
   367  		oldKey,
   368  	)
   369  
   370  	version, err := helper.CurrentVersion()
   371  	if err != nil {
   372  		return err
   373  	}
   374  
   375  	return helper.MigrateToVersion(version)
   376  }
   377  
   378  func (cmd *ATCCommand) WireDynamicFlags(commandFlags *flags.Command) {
   379  	cmd.RunCommand.WireDynamicFlags(commandFlags)
   380  }
   381  
   382  func (cmd *RunCommand) WireDynamicFlags(commandFlags *flags.Command) {
   383  	var (
   384  		metricsGroup      *flags.Group
   385  		policyChecksGroup *flags.Group
   386  		credsGroup        *flags.Group
   387  		authGroup         *flags.Group
   388  	)
   389  
   390  	groups := commandFlags.Groups()
   391  	for i := 0; i < len(groups); i++ {
   392  		group := groups[i]
   393  
   394  		if credsGroup == nil && group.ShortDescription == "Credential Management" {
   395  			credsGroup = group
   396  		}
   397  
   398  		if metricsGroup == nil && group.ShortDescription == "Metrics & Diagnostics" {
   399  			metricsGroup = group
   400  		}
   401  
   402  		if policyChecksGroup == nil && group.ShortDescription == "Policy Checking" {
   403  			policyChecksGroup = group
   404  		}
   405  
   406  		if authGroup == nil && group.ShortDescription == "Authentication" {
   407  			authGroup = group
   408  		}
   409  
   410  		if metricsGroup != nil && credsGroup != nil && authGroup != nil && policyChecksGroup != nil {
   411  			break
   412  		}
   413  
   414  		groups = append(groups, group.Groups()...)
   415  	}
   416  
   417  	if metricsGroup == nil {
   418  		panic("could not find Metrics & Diagnostics group for registering emitters")
   419  	}
   420  
   421  	if policyChecksGroup == nil {
   422  		panic("could not find Policy Checking group for registering policy checkers")
   423  	}
   424  
   425  	if credsGroup == nil {
   426  		panic("could not find Credential Management group for registering managers")
   427  	}
   428  
   429  	if authGroup == nil {
   430  		panic("could not find Authentication group for registering connectors")
   431  	}
   432  
   433  	managerConfigs := make(creds.Managers)
   434  	for name, p := range creds.ManagerFactories() {
   435  		managerConfigs[name] = p.AddConfig(credsGroup)
   436  	}
   437  	cmd.CredentialManagers = managerConfigs
   438  
   439  	metric.Metrics.WireEmitters(metricsGroup)
   440  
   441  	policy.WireCheckers(policyChecksGroup)
   442  
   443  	skycmd.WireConnectors(authGroup)
   444  	skycmd.WireTeamConnectors(authGroup.Find("Authentication (Main Team)"))
   445  }
   446  
   447  func (cmd *RunCommand) Execute(args []string) error {
   448  	runner, err := cmd.Runner(args)
   449  	if err != nil {
   450  		return err
   451  	}
   452  
   453  	return <-ifrit.Invoke(sigmon.New(runner)).Wait()
   454  }
   455  
   456  func (cmd *RunCommand) Runner(positionalArguments []string) (ifrit.Runner, error) {
   457  	if cmd.ExternalURL.URL == nil {
   458  		cmd.ExternalURL = cmd.DefaultURL()
   459  	}
   460  
   461  	if len(positionalArguments) != 0 {
   462  		return nil, fmt.Errorf("unexpected positional arguments: %v", positionalArguments)
   463  	}
   464  
   465  	err := cmd.validate()
   466  	if err != nil {
   467  		return nil, err
   468  	}
   469  
   470  	logger, reconfigurableSink := cmd.Logger.Logger("atc")
   471  	if cmd.LogClusterName {
   472  		logger = logger.WithData(lager.Data{
   473  			"cluster": cmd.Server.ClusterName,
   474  		})
   475  	}
   476  
   477  	commandSession := logger.Session("cmd")
   478  	startTime := time.Now()
   479  
   480  	commandSession.Info("start")
   481  	defer commandSession.Info("finish", lager.Data{
   482  		"duration": time.Now().Sub(startTime),
   483  	})
   484  
   485  	atc.EnableGlobalResources = cmd.FeatureFlags.EnableGlobalResources
   486  	atc.EnableRedactSecrets = cmd.FeatureFlags.EnableRedactSecrets
   487  	atc.EnableBuildRerunWhenWorkerDisappears = cmd.FeatureFlags.EnableBuildRerunWhenWorkerDisappears
   488  	atc.EnableAcrossStep = cmd.FeatureFlags.EnableAcrossStep
   489  	atc.EnablePipelineInstances = cmd.FeatureFlags.EnablePipelineInstances
   490  
   491  	if cmd.BaseResourceTypeDefaults.Path() != "" {
   492  		content, err := ioutil.ReadFile(cmd.BaseResourceTypeDefaults.Path())
   493  		if err != nil {
   494  			return nil, err
   495  		}
   496  
   497  		defaults := map[string]atc.Source{}
   498  		err = yaml.Unmarshal(content, &defaults)
   499  		if err != nil {
   500  			return nil, err
   501  		}
   502  
   503  		atc.LoadBaseResourceTypeDefaults(defaults)
   504  	}
   505  
   506  	//FIXME: These only need to run once for the entire binary. At the moment,
   507  	//they rely on state of the command.
   508  	db.SetupConnectionRetryingDriver(
   509  		"postgres",
   510  		cmd.Postgres.ConnectionString(),
   511  		retryingDriverName,
   512  	)
   513  
   514  	// Register the sink that collects error metrics
   515  	if cmd.Metrics.CaptureErrorMetrics {
   516  		errorSinkCollector := metric.NewErrorSinkCollector(
   517  			logger,
   518  			metric.Metrics,
   519  		)
   520  		logger.RegisterSink(&errorSinkCollector)
   521  	}
   522  
   523  	err = cmd.Tracing.Prepare()
   524  	if err != nil {
   525  		return nil, err
   526  	}
   527  
   528  	http.HandleFunc("/debug/connections", func(w http.ResponseWriter, r *http.Request) {
   529  		for _, stack := range db.GlobalConnectionTracker.Current() {
   530  			fmt.Fprintln(w, stack)
   531  		}
   532  	})
   533  
   534  	if err := cmd.configureMetrics(logger); err != nil {
   535  		return nil, err
   536  	}
   537  
   538  	lockConn, err := cmd.constructLockConn(retryingDriverName)
   539  	if err != nil {
   540  		return nil, err
   541  	}
   542  
   543  	lockFactory := lock.NewLockFactory(lockConn, metric.LogLockAcquired, metric.LogLockReleased)
   544  
   545  	apiConn, err := cmd.constructDBConn(retryingDriverName, logger, cmd.APIMaxOpenConnections, cmd.APIMaxOpenConnections/2, "api", lockFactory)
   546  	if err != nil {
   547  		return nil, err
   548  	}
   549  
   550  	backendConn, err := cmd.constructDBConn(retryingDriverName, logger, cmd.BackendMaxOpenConnections, cmd.BackendMaxOpenConnections/2, "backend", lockFactory)
   551  	if err != nil {
   552  		return nil, err
   553  	}
   554  
   555  	gcConn, err := cmd.constructDBConn(retryingDriverName, logger, 5, 2, "gc", lockFactory)
   556  	if err != nil {
   557  		return nil, err
   558  	}
   559  
   560  	workerConn, err := cmd.constructDBConn(retryingDriverName, logger, 1, 1, "worker", lockFactory)
   561  	if err != nil {
   562  		return nil, err
   563  	}
   564  
   565  	storage, err := storage.NewPostgresStorage(logger, cmd.Postgres)
   566  	if err != nil {
   567  		return nil, err
   568  	}
   569  
   570  	secretManager, err := cmd.secretManager(logger)
   571  	if err != nil {
   572  		return nil, err
   573  	}
   574  
   575  	cmd.varSourcePool = creds.NewVarSourcePool(
   576  		logger.Session("var-source-pool"),
   577  		cmd.CredentialManagement,
   578  		5*time.Minute,
   579  		1*time.Minute,
   580  		clock.NewClock(),
   581  	)
   582  
   583  	members, err := cmd.constructMembers(logger, reconfigurableSink, apiConn, workerConn, backendConn, gcConn, storage, lockFactory, secretManager)
   584  	if err != nil {
   585  		return nil, err
   586  	}
   587  
   588  	members = append(members, grouper.Member{
   589  		Name: "periodic-metrics",
   590  		Runner: metric.PeriodicallyEmit(
   591  			logger.Session("periodic-metrics"),
   592  			metric.Metrics,
   593  			10*time.Second,
   594  		),
   595  	})
   596  
   597  	onReady := func() {
   598  		logData := lager.Data{
   599  			"http":  cmd.nonTLSBindAddr(),
   600  			"debug": cmd.debugBindAddr(),
   601  		}
   602  
   603  		if cmd.isTLSEnabled() {
   604  			logData["https"] = cmd.tlsBindAddr()
   605  		}
   606  
   607  		logger.Info("listening", logData)
   608  	}
   609  
   610  	onExit := func() {
   611  		for _, closer := range []Closer{lockConn, apiConn, backendConn, gcConn, storage, workerConn} {
   612  			closer.Close()
   613  		}
   614  
   615  		cmd.varSourcePool.Close()
   616  	}
   617  
   618  	return run(grouper.NewParallel(os.Interrupt, members), onReady, onExit), nil
   619  }
   620  
   621  func (cmd *RunCommand) constructMembers(
   622  	logger lager.Logger,
   623  	reconfigurableSink *lager.ReconfigurableSink,
   624  	apiConn db.Conn,
   625  	workerConn db.Conn,
   626  	backendConn db.Conn,
   627  	gcConn db.Conn,
   628  	storage storage.Storage,
   629  	lockFactory lock.LockFactory,
   630  	secretManager creds.Secrets,
   631  ) ([]grouper.Member, error) {
   632  	if cmd.TelemetryOptIn {
   633  		url := fmt.Sprintf("http://telemetry.concourse-ci.org/?version=%s", concourse.Version)
   634  		go func() {
   635  			_, err := http.Get(url)
   636  			if err != nil {
   637  				logger.Error("telemetry-version", err)
   638  			}
   639  		}()
   640  	}
   641  
   642  	policyChecker, err := policy.Initialize(logger, cmd.Server.ClusterName, concourse.Version, cmd.PolicyCheckers.Filter)
   643  	if err != nil {
   644  		return nil, err
   645  	}
   646  
   647  	apiMembers, err := cmd.constructAPIMembers(logger, reconfigurableSink, apiConn, workerConn, storage, lockFactory, secretManager, policyChecker)
   648  	if err != nil {
   649  		return nil, err
   650  	}
   651  
   652  	backendComponents, err := cmd.backendComponents(logger, backendConn, lockFactory, secretManager, policyChecker)
   653  	if err != nil {
   654  		return nil, err
   655  	}
   656  
   657  	gcComponents, err := cmd.gcComponents(logger, gcConn, lockFactory)
   658  	if err != nil {
   659  		return nil, err
   660  	}
   661  
   662  	// use backendConn so that the Component objects created by the factory uses
   663  	// the backend connection pool when reloading.
   664  	componentFactory := db.NewComponentFactory(backendConn)
   665  	bus := backendConn.Bus()
   666  
   667  	members := apiMembers
   668  	components := append(backendComponents, gcComponents...)
   669  	for _, c := range components {
   670  		dbComponent, err := componentFactory.CreateOrUpdate(c.Component)
   671  		if err != nil {
   672  			return nil, err
   673  		}
   674  
   675  		componentLogger := logger.Session(c.Component.Name)
   676  
   677  		members = append(members, grouper.Member{
   678  			Name: c.Component.Name,
   679  			Runner: &component.Runner{
   680  				Logger:    componentLogger,
   681  				Interval:  cmd.ComponentRunnerInterval,
   682  				Component: dbComponent,
   683  				Bus:       bus,
   684  				Schedulable: &component.Coordinator{
   685  					Locker:    lockFactory,
   686  					Component: dbComponent,
   687  					Runnable:  c.Runnable,
   688  				},
   689  			},
   690  		})
   691  
   692  		if drainable, ok := c.Runnable.(component.Drainable); ok {
   693  			members = append(members, grouper.Member{
   694  				Name: c.Component.Name + "-drainer",
   695  				Runner: drainRunner{
   696  					logger:  componentLogger.Session("drain"),
   697  					drainer: drainable,
   698  				},
   699  			})
   700  		}
   701  	}
   702  
   703  	return members, nil
   704  }
   705  
   706  func (cmd *RunCommand) constructAPIMembers(
   707  	logger lager.Logger,
   708  	reconfigurableSink *lager.ReconfigurableSink,
   709  	dbConn db.Conn,
   710  	workerConn db.Conn,
   711  	storage storage.Storage,
   712  	lockFactory lock.LockFactory,
   713  	secretManager creds.Secrets,
   714  	policyChecker policy.Checker,
   715  ) ([]grouper.Member, error) {
   716  
   717  	httpClient, err := cmd.skyHttpClient()
   718  	if err != nil {
   719  		return nil, err
   720  	}
   721  
   722  	teamFactory := db.NewTeamFactory(dbConn, lockFactory)
   723  	workerTeamFactory := db.NewTeamFactory(workerConn, lockFactory)
   724  
   725  	_, err = teamFactory.CreateDefaultTeamIfNotExists()
   726  	if err != nil {
   727  		return nil, err
   728  	}
   729  
   730  	err = cmd.configureAuthForDefaultTeam(teamFactory)
   731  	if err != nil {
   732  		return nil, err
   733  	}
   734  
   735  	userFactory := db.NewUserFactory(dbConn)
   736  
   737  	dbResourceCacheFactory := db.NewResourceCacheFactory(dbConn, lockFactory)
   738  	fetchSourceFactory := worker.NewFetchSourceFactory(dbResourceCacheFactory)
   739  	resourceFetcher := worker.NewFetcher(clock.NewClock(), lockFactory, fetchSourceFactory)
   740  	dbResourceConfigFactory := db.NewResourceConfigFactory(dbConn, lockFactory)
   741  
   742  	dbWorkerBaseResourceTypeFactory := db.NewWorkerBaseResourceTypeFactory(dbConn)
   743  	dbWorkerTaskCacheFactory := db.NewWorkerTaskCacheFactory(dbConn)
   744  	dbTaskCacheFactory := db.NewTaskCacheFactory(dbConn)
   745  	dbVolumeRepository := db.NewVolumeRepository(dbConn)
   746  	dbWorkerFactory := db.NewWorkerFactory(workerConn)
   747  	workerVersion, err := workerVersion()
   748  	if err != nil {
   749  		return nil, err
   750  	}
   751  
   752  	// XXX(substeps): why is this unconditional?
   753  	// A: we're constructing API components and none of them use the streaming
   754  	// funcs which relies on a compression method.
   755  	compressionLib := compression.NewGzipCompression()
   756  	workerProvider := worker.NewDBWorkerProvider(
   757  		lockFactory,
   758  		retryhttp.NewExponentialBackOffFactory(5*time.Minute),
   759  		resourceFetcher,
   760  		image.NewImageFactory(),
   761  		dbResourceCacheFactory,
   762  		dbResourceConfigFactory,
   763  		dbWorkerBaseResourceTypeFactory,
   764  		dbTaskCacheFactory,
   765  		dbWorkerTaskCacheFactory,
   766  		dbVolumeRepository,
   767  		teamFactory,
   768  		dbWorkerFactory,
   769  		workerVersion,
   770  		cmd.BaggageclaimResponseHeaderTimeout,
   771  		cmd.GardenRequestTimeout,
   772  	)
   773  
   774  	pool := worker.NewPool(workerProvider)
   775  	workerClient := worker.NewClient(pool, workerProvider, compressionLib, workerAvailabilityPollingInterval, workerStatusPublishInterval, cmd.FeatureFlags.EnableP2PVolumeStreaming, cmd.P2pVolumeStreamingTimeout)
   776  
   777  	credsManagers := cmd.CredentialManagers
   778  	dbPipelineFactory := db.NewPipelineFactory(dbConn, lockFactory)
   779  	dbJobFactory := db.NewJobFactory(dbConn, lockFactory)
   780  	dbResourceFactory := db.NewResourceFactory(dbConn, lockFactory)
   781  	dbContainerRepository := db.NewContainerRepository(dbConn)
   782  	gcContainerDestroyer := gc.NewDestroyer(logger, dbContainerRepository, dbVolumeRepository)
   783  	dbBuildFactory := db.NewBuildFactory(dbConn, lockFactory, cmd.GC.OneOffBuildGracePeriod, cmd.GC.FailedGracePeriod)
   784  	dbCheckFactory := db.NewCheckFactory(dbConn, lockFactory, secretManager, cmd.varSourcePool, db.CheckDurations{
   785  		Interval:            cmd.ResourceCheckingInterval,
   786  		IntervalWithWebhook: cmd.ResourceWithWebhookCheckingInterval,
   787  		Timeout:             cmd.GlobalResourceCheckTimeout,
   788  	})
   789  	dbAccessTokenFactory := db.NewAccessTokenFactory(dbConn)
   790  	dbClock := db.NewClock()
   791  	dbWall := db.NewWall(dbConn, &dbClock)
   792  
   793  	tokenVerifier := cmd.constructTokenVerifier(dbAccessTokenFactory)
   794  
   795  	teamsCacher := accessor.NewTeamsCacher(
   796  		logger,
   797  		dbConn.Bus(),
   798  		teamFactory,
   799  		time.Minute,
   800  		time.Minute,
   801  	)
   802  
   803  	accessFactory := accessor.NewAccessFactory(
   804  		tokenVerifier,
   805  		teamsCacher,
   806  		cmd.SystemClaimKey,
   807  		cmd.SystemClaimValues,
   808  	)
   809  
   810  	middleware := token.NewMiddleware(cmd.Auth.AuthFlags.SecureCookies)
   811  
   812  	apiHandler, err := cmd.constructAPIHandler(
   813  		logger,
   814  		reconfigurableSink,
   815  		teamFactory,
   816  		workerTeamFactory,
   817  		dbPipelineFactory,
   818  		dbJobFactory,
   819  		dbResourceFactory,
   820  		dbWorkerFactory,
   821  		dbVolumeRepository,
   822  		dbContainerRepository,
   823  		gcContainerDestroyer,
   824  		dbBuildFactory,
   825  		dbCheckFactory,
   826  		dbResourceConfigFactory,
   827  		userFactory,
   828  		workerClient,
   829  		secretManager,
   830  		credsManagers,
   831  		accessFactory,
   832  		dbWall,
   833  		policyChecker,
   834  	)
   835  	if err != nil {
   836  		return nil, err
   837  	}
   838  
   839  	webHandler, err := cmd.constructWebHandler(logger)
   840  	if err != nil {
   841  		return nil, err
   842  	}
   843  
   844  	authHandler, err := cmd.constructAuthHandler(
   845  		logger,
   846  		storage,
   847  		dbAccessTokenFactory,
   848  		userFactory,
   849  	)
   850  	if err != nil {
   851  		return nil, err
   852  	}
   853  
   854  	loginHandler, err := cmd.constructLoginHandler(
   855  		logger,
   856  		httpClient,
   857  		middleware,
   858  	)
   859  	if err != nil {
   860  		return nil, err
   861  	}
   862  
   863  	legacyHandler, err := cmd.constructLegacyHandler(
   864  		logger,
   865  	)
   866  	if err != nil {
   867  		return nil, err
   868  	}
   869  
   870  	var httpHandler, httpsHandler http.Handler
   871  	if cmd.isTLSEnabled() {
   872  		httpHandler = cmd.constructHTTPHandler(
   873  			logger,
   874  
   875  			tlsRedirectHandler{
   876  				matchHostname: cmd.ExternalURL.URL.Hostname(),
   877  				externalHost:  cmd.ExternalURL.URL.Host,
   878  				baseHandler:   webHandler,
   879  			},
   880  
   881  			// note: intentionally not wrapping API; redirecting is more trouble than
   882  			// it's worth.
   883  
   884  			// we're mainly interested in having the web UI consistently https:// -
   885  			// API requests will likely not respect the redirected https:// URI upon
   886  			// the next request, plus the payload will have already been sent in
   887  			// plaintext
   888  			apiHandler,
   889  
   890  			tlsRedirectHandler{
   891  				matchHostname: cmd.ExternalURL.URL.Hostname(),
   892  				externalHost:  cmd.ExternalURL.URL.Host,
   893  				baseHandler:   authHandler,
   894  			},
   895  			tlsRedirectHandler{
   896  				matchHostname: cmd.ExternalURL.URL.Hostname(),
   897  				externalHost:  cmd.ExternalURL.URL.Host,
   898  				baseHandler:   loginHandler,
   899  			},
   900  			tlsRedirectHandler{
   901  				matchHostname: cmd.ExternalURL.URL.Hostname(),
   902  				externalHost:  cmd.ExternalURL.URL.Host,
   903  				baseHandler:   legacyHandler,
   904  			},
   905  			middleware,
   906  		)
   907  
   908  		httpsHandler = cmd.constructHTTPHandler(
   909  			logger,
   910  			webHandler,
   911  			apiHandler,
   912  			authHandler,
   913  			loginHandler,
   914  			legacyHandler,
   915  			middleware,
   916  		)
   917  	} else {
   918  		httpHandler = cmd.constructHTTPHandler(
   919  			logger,
   920  			webHandler,
   921  			apiHandler,
   922  			authHandler,
   923  			loginHandler,
   924  			legacyHandler,
   925  			middleware,
   926  		)
   927  	}
   928  
   929  	members := []grouper.Member{
   930  		{Name: "debug", Runner: http_server.New(
   931  			cmd.debugBindAddr(),
   932  			http.DefaultServeMux,
   933  		)},
   934  		{Name: "web", Runner: http_server.New(
   935  			cmd.nonTLSBindAddr(),
   936  			httpHandler,
   937  		)},
   938  	}
   939  
   940  	if httpsHandler != nil {
   941  		tlsConfig, err := cmd.tlsConfig(logger, dbConn)
   942  		if err != nil {
   943  			return nil, err
   944  		}
   945  		members = append(members, grouper.Member{Name: "web-tls", Runner: http_server.NewTLSServer(
   946  			cmd.tlsBindAddr(),
   947  			httpsHandler,
   948  			tlsConfig,
   949  		)})
   950  	}
   951  
   952  	return members, nil
   953  }
   954  
   955  func (cmd *RunCommand) backendComponents(
   956  	logger lager.Logger,
   957  	dbConn db.Conn,
   958  	lockFactory lock.LockFactory,
   959  	secretManager creds.Secrets,
   960  	policyChecker policy.Checker,
   961  ) ([]RunnableComponent, error) {
   962  
   963  	if cmd.Syslog.Address != "" && cmd.Syslog.Transport == "" {
   964  		return nil, fmt.Errorf("syslog Drainer is misconfigured, cannot configure a drainer without a transport")
   965  	}
   966  
   967  	syslogDrainConfigured := true
   968  	if cmd.Syslog.Address == "" {
   969  		syslogDrainConfigured = false
   970  	}
   971  
   972  	teamFactory := db.NewTeamFactory(dbConn, lockFactory)
   973  
   974  	resourceFactory := resource.NewResourceFactory()
   975  	dbResourceCacheFactory := db.NewResourceCacheFactory(dbConn, lockFactory)
   976  	fetchSourceFactory := worker.NewFetchSourceFactory(dbResourceCacheFactory)
   977  	resourceFetcher := worker.NewFetcher(clock.NewClock(), lockFactory, fetchSourceFactory)
   978  	dbResourceConfigFactory := db.NewResourceConfigFactory(dbConn, lockFactory)
   979  
   980  	dbBuildFactory := db.NewBuildFactory(dbConn, lockFactory, cmd.GC.OneOffBuildGracePeriod, cmd.GC.FailedGracePeriod)
   981  	dbCheckFactory := db.NewCheckFactory(dbConn, lockFactory, secretManager, cmd.varSourcePool, db.CheckDurations{
   982  		Interval:            cmd.ResourceCheckingInterval,
   983  		IntervalWithWebhook: cmd.ResourceWithWebhookCheckingInterval,
   984  		Timeout:             cmd.GlobalResourceCheckTimeout,
   985  	})
   986  	dbPipelineFactory := db.NewPipelineFactory(dbConn, lockFactory)
   987  	dbJobFactory := db.NewJobFactory(dbConn, lockFactory)
   988  	dbPipelineLifecycle := db.NewPipelineLifecycle(dbConn, lockFactory)
   989  
   990  	alg := algorithm.New(db.NewVersionsDB(dbConn, algorithmLimitRows, schedulerCache))
   991  
   992  	dbWorkerBaseResourceTypeFactory := db.NewWorkerBaseResourceTypeFactory(dbConn)
   993  	dbTaskCacheFactory := db.NewTaskCacheFactory(dbConn)
   994  	dbWorkerTaskCacheFactory := db.NewWorkerTaskCacheFactory(dbConn)
   995  	dbVolumeRepository := db.NewVolumeRepository(dbConn)
   996  	dbWorkerFactory := db.NewWorkerFactory(dbConn)
   997  	workerVersion, err := workerVersion()
   998  	if err != nil {
   999  		return nil, err
  1000  	}
  1001  
  1002  	var compressionLib compression.Compression
  1003  	if cmd.StreamingArtifactsCompression == "zstd" {
  1004  		compressionLib = compression.NewZstdCompression()
  1005  	} else {
  1006  		compressionLib = compression.NewGzipCompression()
  1007  	}
  1008  	workerProvider := worker.NewDBWorkerProvider(
  1009  		lockFactory,
  1010  		retryhttp.NewExponentialBackOffFactory(5*time.Minute),
  1011  		resourceFetcher,
  1012  		image.NewImageFactory(),
  1013  		dbResourceCacheFactory,
  1014  		dbResourceConfigFactory,
  1015  		dbWorkerBaseResourceTypeFactory,
  1016  		dbTaskCacheFactory,
  1017  		dbWorkerTaskCacheFactory,
  1018  		dbVolumeRepository,
  1019  		teamFactory,
  1020  		dbWorkerFactory,
  1021  		workerVersion,
  1022  		cmd.BaggageclaimResponseHeaderTimeout,
  1023  		cmd.GardenRequestTimeout,
  1024  	)
  1025  
  1026  	pool := worker.NewPool(workerProvider)
  1027  	workerClient := worker.NewClient(pool,
  1028  		workerProvider,
  1029  		compressionLib,
  1030  		workerAvailabilityPollingInterval,
  1031  		workerStatusPublishInterval,
  1032  		cmd.FeatureFlags.EnableP2PVolumeStreaming,
  1033  		cmd.P2pVolumeStreamingTimeout,
  1034  	)
  1035  
  1036  	defaultLimits, err := cmd.parseDefaultLimits()
  1037  	if err != nil {
  1038  		return nil, err
  1039  	}
  1040  
  1041  	buildContainerStrategy, err := cmd.chooseBuildContainerStrategy()
  1042  	if err != nil {
  1043  		return nil, err
  1044  	}
  1045  
  1046  	rateLimiter := db.NewResourceCheckRateLimiter(
  1047  		rate.Limit(cmd.MaxChecksPerSecond),
  1048  		cmd.ResourceCheckingInterval,
  1049  		dbConn,
  1050  		time.Minute,
  1051  		clock.NewClock(),
  1052  	)
  1053  
  1054  	engine := cmd.constructEngine(
  1055  		pool,
  1056  		workerClient,
  1057  		resourceFactory,
  1058  		teamFactory,
  1059  		dbBuildFactory,
  1060  		dbResourceCacheFactory,
  1061  		dbResourceConfigFactory,
  1062  		secretManager,
  1063  		defaultLimits,
  1064  		buildContainerStrategy,
  1065  		lockFactory,
  1066  		rateLimiter,
  1067  		policyChecker,
  1068  	)
  1069  
  1070  	// In case that a user configures resource-checking-interval, but forgets to
  1071  	// configure resource-with-webhook-checking-interval, keep both checking-
  1072  	// intervals consistent. Even if both intervals are configured, there is no
  1073  	// reason webhooked resources take shorter checking interval than normal
  1074  	// resources.
  1075  	if cmd.ResourceWithWebhookCheckingInterval < cmd.ResourceCheckingInterval {
  1076  		logger.Info("update-resource-with-webhook-checking-interval",
  1077  			lager.Data{
  1078  				"oldValue": cmd.ResourceWithWebhookCheckingInterval,
  1079  				"newValue": cmd.ResourceCheckingInterval,
  1080  			})
  1081  		cmd.ResourceWithWebhookCheckingInterval = cmd.ResourceCheckingInterval
  1082  	}
  1083  
  1084  	components := []RunnableComponent{
  1085  		{
  1086  			Component: atc.Component{
  1087  				Name:     atc.ComponentLidarScanner,
  1088  				Interval: cmd.LidarScannerInterval,
  1089  			},
  1090  			Runnable: lidar.NewScanner(dbCheckFactory),
  1091  		},
  1092  		{
  1093  			Component: atc.Component{
  1094  				Name:     atc.ComponentScheduler,
  1095  				Interval: 10 * time.Second,
  1096  			},
  1097  			Runnable: scheduler.NewRunner(
  1098  				logger.Session("scheduler"),
  1099  				dbJobFactory,
  1100  				&scheduler.Scheduler{
  1101  					Algorithm: alg,
  1102  					BuildStarter: scheduler.NewBuildStarter(
  1103  						builds.NewPlanner(
  1104  							atc.NewPlanFactory(time.Now().Unix()),
  1105  						),
  1106  						alg),
  1107  				},
  1108  				cmd.JobSchedulingMaxInFlight,
  1109  			),
  1110  		},
  1111  		{
  1112  			Component: atc.Component{
  1113  				Name:     atc.ComponentBuildTracker,
  1114  				Interval: cmd.BuildTrackerInterval,
  1115  			},
  1116  			Runnable: builds.NewTracker(dbBuildFactory, engine),
  1117  		},
  1118  		{
  1119  			Component: atc.Component{
  1120  				Name:     atc.ComponentBuildReaper,
  1121  				Interval: 30 * time.Second,
  1122  			},
  1123  			Runnable: gc.NewBuildLogCollector(
  1124  				dbPipelineFactory,
  1125  				dbPipelineLifecycle,
  1126  				500,
  1127  				gc.NewBuildLogRetentionCalculator(
  1128  					cmd.DefaultBuildLogsToRetain,
  1129  					cmd.MaxBuildLogsToRetain,
  1130  					cmd.DefaultDaysToRetainBuildLogs,
  1131  					cmd.MaxDaysToRetainBuildLogs,
  1132  				),
  1133  				syslogDrainConfigured,
  1134  			),
  1135  		},
  1136  	}
  1137  
  1138  	if syslogDrainConfigured {
  1139  		components = append(components, RunnableComponent{
  1140  			Component: atc.Component{
  1141  				Name:     atc.ComponentSyslogDrainer,
  1142  				Interval: cmd.Syslog.DrainInterval,
  1143  			},
  1144  			Runnable: syslog.NewDrainer(
  1145  				cmd.Syslog.Transport,
  1146  				cmd.Syslog.Address,
  1147  				cmd.Syslog.Hostname,
  1148  				cmd.Syslog.CACerts,
  1149  				dbBuildFactory,
  1150  			),
  1151  		})
  1152  	}
  1153  
  1154  	return components, err
  1155  }
  1156  
  1157  func (cmd *RunCommand) gcComponents(
  1158  	logger lager.Logger,
  1159  	gcConn db.Conn,
  1160  	lockFactory lock.LockFactory,
  1161  ) ([]RunnableComponent, error) {
  1162  	dbWorkerLifecycle := db.NewWorkerLifecycle(gcConn)
  1163  	dbResourceCacheLifecycle := db.NewResourceCacheLifecycle(gcConn)
  1164  	dbContainerRepository := db.NewContainerRepository(gcConn)
  1165  	dbArtifactLifecycle := db.NewArtifactLifecycle(gcConn)
  1166  	dbAccessTokenLifecycle := db.NewAccessTokenLifecycle(gcConn)
  1167  	resourceConfigCheckSessionLifecycle := db.NewResourceConfigCheckSessionLifecycle(gcConn)
  1168  	dbBuildFactory := db.NewBuildFactory(gcConn, lockFactory, cmd.GC.OneOffBuildGracePeriod, cmd.GC.FailedGracePeriod)
  1169  	dbResourceConfigFactory := db.NewResourceConfigFactory(gcConn, lockFactory)
  1170  	dbPipelineLifecycle := db.NewPipelineLifecycle(gcConn, lockFactory)
  1171  
  1172  	dbVolumeRepository := db.NewVolumeRepository(gcConn)
  1173  
  1174  	// set the 'unreferenced resource config' grace period to be the longer than
  1175  	// the check timeout, just to make sure it doesn't get removed out from under
  1176  	// a running check
  1177  	//
  1178  	// 5 minutes is arbitrary - this really shouldn't matter a whole lot, but
  1179  	// exposing a config specifically for it is a little risky, since you don't
  1180  	// want to set it too low.
  1181  	unreferencedConfigGracePeriod := cmd.GlobalResourceCheckTimeout + 5*time.Minute
  1182  
  1183  	collectors := map[string]component.Runnable{
  1184  		atc.ComponentCollectorBuilds:            gc.NewBuildCollector(dbBuildFactory),
  1185  		atc.ComponentCollectorWorkers:           gc.NewWorkerCollector(dbWorkerLifecycle),
  1186  		atc.ComponentCollectorResourceConfigs:   gc.NewResourceConfigCollector(dbResourceConfigFactory, unreferencedConfigGracePeriod),
  1187  		atc.ComponentCollectorResourceCaches:    gc.NewResourceCacheCollector(dbResourceCacheLifecycle),
  1188  		atc.ComponentCollectorResourceCacheUses: gc.NewResourceCacheUseCollector(dbResourceCacheLifecycle),
  1189  		atc.ComponentCollectorArtifacts:         gc.NewArtifactCollector(dbArtifactLifecycle),
  1190  		atc.ComponentCollectorVolumes:           gc.NewVolumeCollector(dbVolumeRepository, cmd.GC.MissingGracePeriod),
  1191  		atc.ComponentCollectorContainers:        gc.NewContainerCollector(dbContainerRepository, cmd.GC.MissingGracePeriod, cmd.GC.HijackGracePeriod),
  1192  		atc.ComponentCollectorCheckSessions:     gc.NewResourceConfigCheckSessionCollector(resourceConfigCheckSessionLifecycle),
  1193  		atc.ComponentCollectorPipelines:         gc.NewPipelineCollector(dbPipelineLifecycle),
  1194  		atc.ComponentCollectorAccessTokens:      gc.NewAccessTokensCollector(dbAccessTokenLifecycle, jwt.DefaultLeeway),
  1195  	}
  1196  
  1197  	var components []RunnableComponent
  1198  	for collectorName, collector := range collectors {
  1199  		components = append(components, RunnableComponent{
  1200  			Component: atc.Component{
  1201  				Name:     collectorName,
  1202  				Interval: cmd.GC.Interval,
  1203  			},
  1204  			Runnable: collector,
  1205  		})
  1206  	}
  1207  
  1208  	return components, nil
  1209  }
  1210  
  1211  func (cmd *RunCommand) validateCustomRoles() error {
  1212  	path := cmd.ConfigRBAC.Path()
  1213  	if path == "" {
  1214  		return nil
  1215  	}
  1216  
  1217  	content, err := ioutil.ReadFile(path)
  1218  	if err != nil {
  1219  		return fmt.Errorf("failed to open RBAC config file (%s): %w", cmd.ConfigRBAC, err)
  1220  	}
  1221  
  1222  	var data map[string][]string
  1223  	if err = yaml.Unmarshal(content, &data); err != nil {
  1224  		return fmt.Errorf("failed to parse RBAC config file (%s): %w", cmd.ConfigRBAC, err)
  1225  	}
  1226  
  1227  	allKnownRoles := map[string]bool{}
  1228  	for _, roleName := range accessor.DefaultRoles {
  1229  		allKnownRoles[roleName] = true
  1230  	}
  1231  
  1232  	for role, actions := range data {
  1233  		if _, ok := allKnownRoles[role]; !ok {
  1234  			return fmt.Errorf("failed to customize roles: %w", fmt.Errorf("unknown role %s", role))
  1235  		}
  1236  
  1237  		for _, action := range actions {
  1238  			if _, ok := accessor.DefaultRoles[action]; !ok {
  1239  				return fmt.Errorf("failed to customize roles: %w", fmt.Errorf("unknown action %s", action))
  1240  			}
  1241  		}
  1242  	}
  1243  
  1244  	return nil
  1245  }
  1246  
  1247  func (cmd *RunCommand) parseCustomRoles() (map[string]string, error) {
  1248  	mapping := map[string]string{}
  1249  
  1250  	path := cmd.ConfigRBAC.Path()
  1251  	if path == "" {
  1252  		return mapping, nil
  1253  	}
  1254  
  1255  	content, err := ioutil.ReadFile(path)
  1256  	if err != nil {
  1257  		return nil, err
  1258  	}
  1259  
  1260  	var data map[string][]string
  1261  	if err = yaml.Unmarshal(content, &data); err != nil {
  1262  		return nil, err
  1263  	}
  1264  
  1265  	for role, actions := range data {
  1266  		for _, action := range actions {
  1267  			mapping[action] = role
  1268  		}
  1269  	}
  1270  
  1271  	return mapping, nil
  1272  }
  1273  
  1274  func workerVersion() (version.Version, error) {
  1275  	return version.NewVersionFromString(concourse.WorkerVersion)
  1276  }
  1277  
  1278  func (cmd *RunCommand) secretManager(logger lager.Logger) (creds.Secrets, error) {
  1279  	var secretsFactory creds.SecretsFactory = noop.NewNoopFactory()
  1280  	for name, manager := range cmd.CredentialManagers {
  1281  		if !manager.IsConfigured() {
  1282  			continue
  1283  		}
  1284  
  1285  		credsLogger := logger.Session("credential-manager", lager.Data{
  1286  			"name": name,
  1287  		})
  1288  
  1289  		credsLogger.Info("configured credentials manager")
  1290  
  1291  		err := manager.Init(credsLogger)
  1292  		if err != nil {
  1293  			return nil, err
  1294  		}
  1295  
  1296  		err = manager.Validate()
  1297  		if err != nil {
  1298  			return nil, fmt.Errorf("credential manager '%s' misconfigured: %s", name, err)
  1299  		}
  1300  
  1301  		secretsFactory, err = manager.NewSecretsFactory(credsLogger)
  1302  		if err != nil {
  1303  			return nil, err
  1304  		}
  1305  
  1306  		break
  1307  	}
  1308  
  1309  	return cmd.CredentialManagement.NewSecrets(secretsFactory), nil
  1310  }
  1311  
  1312  func (cmd *RunCommand) newKey() *encryption.Key {
  1313  	var newKey *encryption.Key
  1314  	if cmd.EncryptionKey.AEAD != nil {
  1315  		newKey = encryption.NewKey(cmd.EncryptionKey.AEAD)
  1316  	}
  1317  	return newKey
  1318  }
  1319  
  1320  func (cmd *RunCommand) oldKey() *encryption.Key {
  1321  	var oldKey *encryption.Key
  1322  	if cmd.OldEncryptionKey.AEAD != nil {
  1323  		oldKey = encryption.NewKey(cmd.OldEncryptionKey.AEAD)
  1324  	}
  1325  	return oldKey
  1326  }
  1327  
  1328  func (cmd *RunCommand) constructWebHandler(logger lager.Logger) (http.Handler, error) {
  1329  	webHandler, err := web.NewHandler(logger)
  1330  	if err != nil {
  1331  		return nil, err
  1332  	}
  1333  	return metric.WrapHandler(logger, metric.Metrics, "web", webHandler), nil
  1334  }
  1335  
  1336  func (cmd *RunCommand) skyHttpClient() (*http.Client, error) {
  1337  	httpClient := http.DefaultClient
  1338  
  1339  	if cmd.isTLSEnabled() {
  1340  		certpool, err := x509.SystemCertPool()
  1341  		if err != nil {
  1342  			return nil, err
  1343  		}
  1344  
  1345  		if !cmd.LetsEncrypt.Enable {
  1346  			cert, err := tls.LoadX509KeyPair(string(cmd.TLSCert), string(cmd.TLSKey))
  1347  			if err != nil {
  1348  				return nil, err
  1349  			}
  1350  
  1351  			x509Cert, err := x509.ParseCertificate(cert.Certificate[0])
  1352  			if err != nil {
  1353  				return nil, err
  1354  			}
  1355  
  1356  			certpool.AddCert(x509Cert)
  1357  		}
  1358  
  1359  		httpClient.Transport = &http.Transport{
  1360  			TLSClientConfig: &tls.Config{
  1361  				RootCAs: certpool,
  1362  			},
  1363  		}
  1364  	} else {
  1365  		httpClient.Transport = http.DefaultTransport
  1366  	}
  1367  
  1368  	httpClient.Transport = mitmRoundTripper{
  1369  		RoundTripper: httpClient.Transport,
  1370  
  1371  		SourceHost: cmd.ExternalURL.URL.Host,
  1372  		TargetURL:  cmd.DefaultURL().URL,
  1373  	}
  1374  
  1375  	return httpClient, nil
  1376  }
  1377  
  1378  type mitmRoundTripper struct {
  1379  	http.RoundTripper
  1380  
  1381  	SourceHost string
  1382  	TargetURL  *url.URL
  1383  }
  1384  
  1385  func (tripper mitmRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
  1386  	if req.URL.Host == tripper.SourceHost {
  1387  		req.URL.Scheme = tripper.TargetURL.Scheme
  1388  		req.URL.Host = tripper.TargetURL.Host
  1389  	}
  1390  
  1391  	return tripper.RoundTripper.RoundTrip(req)
  1392  }
  1393  
  1394  func (cmd *RunCommand) tlsConfig(logger lager.Logger, dbConn db.Conn) (*tls.Config, error) {
  1395  	var tlsConfig *tls.Config
  1396  	tlsConfig = atc.DefaultTLSConfig()
  1397  
  1398  	if cmd.isTLSEnabled() {
  1399  		tlsLogger := logger.Session("tls-enabled")
  1400  		if cmd.LetsEncrypt.Enable {
  1401  			tlsLogger.Debug("using-autocert-manager")
  1402  
  1403  			cache, err := newDbCache(dbConn)
  1404  			if err != nil {
  1405  				return nil, err
  1406  			}
  1407  			m := autocert.Manager{
  1408  				Prompt:     autocert.AcceptTOS,
  1409  				Cache:      cache,
  1410  				HostPolicy: autocert.HostWhitelist(cmd.ExternalURL.URL.Hostname()),
  1411  				Client:     &acme.Client{DirectoryURL: cmd.LetsEncrypt.ACMEURL.String()},
  1412  			}
  1413  			tlsConfig.NextProtos = append(tlsConfig.NextProtos, acme.ALPNProto)
  1414  			tlsConfig.GetCertificate = m.GetCertificate
  1415  		} else {
  1416  			tlsLogger.Debug("loading-tls-certs")
  1417  			cert, err := tls.LoadX509KeyPair(string(cmd.TLSCert), string(cmd.TLSKey))
  1418  			if err != nil {
  1419  				return nil, err
  1420  			}
  1421  			tlsConfig.Certificates = []tls.Certificate{cert}
  1422  		}
  1423  	}
  1424  	return tlsConfig, nil
  1425  }
  1426  
  1427  func (cmd *RunCommand) parseDefaultLimits() (atc.ContainerLimits, error) {
  1428  	limits := atc.ContainerLimits{}
  1429  	if cmd.DefaultCpuLimit != nil {
  1430  		cpu := atc.CPULimit(*cmd.DefaultCpuLimit)
  1431  		limits.CPU = &cpu
  1432  	}
  1433  	if cmd.DefaultMemoryLimit != nil {
  1434  		memory, err := atc.ParseMemoryLimit(*cmd.DefaultMemoryLimit)
  1435  		if err != nil {
  1436  			return atc.ContainerLimits{}, err
  1437  		}
  1438  		limits.Memory = &memory
  1439  	}
  1440  	return limits, nil
  1441  }
  1442  
  1443  func (cmd *RunCommand) defaultBindIP() net.IP {
  1444  	URL := cmd.BindIP.String()
  1445  	if URL == "0.0.0.0" {
  1446  		URL = "127.0.0.1"
  1447  	}
  1448  
  1449  	return net.ParseIP(URL)
  1450  }
  1451  
  1452  func (cmd *RunCommand) DefaultURL() flag.URL {
  1453  	return flag.URL{
  1454  		URL: &url.URL{
  1455  			Scheme: "http",
  1456  			Host:   fmt.Sprintf("%s:%d", cmd.defaultBindIP().String(), cmd.BindPort),
  1457  		},
  1458  	}
  1459  }
  1460  
  1461  func run(runner ifrit.Runner, onReady func(), onExit func()) ifrit.Runner {
  1462  	return ifrit.RunFunc(func(signals <-chan os.Signal, ready chan<- struct{}) error {
  1463  		process := ifrit.Background(runner)
  1464  
  1465  		subExited := process.Wait()
  1466  		subReady := process.Ready()
  1467  
  1468  		for {
  1469  			select {
  1470  			case <-subReady:
  1471  				onReady()
  1472  				close(ready)
  1473  				subReady = nil
  1474  			case err := <-subExited:
  1475  				onExit()
  1476  				return err
  1477  			case sig := <-signals:
  1478  				process.Signal(sig)
  1479  			}
  1480  		}
  1481  	})
  1482  }
  1483  
  1484  func (cmd *RunCommand) validate() error {
  1485  	var errs *multierror.Error
  1486  
  1487  	switch {
  1488  	case cmd.TLSBindPort == 0:
  1489  		if cmd.TLSCert != "" || cmd.TLSKey != "" || cmd.LetsEncrypt.Enable {
  1490  			errs = multierror.Append(
  1491  				errs,
  1492  				errors.New("must specify --tls-bind-port to use TLS"),
  1493  			)
  1494  		}
  1495  	case cmd.LetsEncrypt.Enable:
  1496  		if cmd.TLSCert != "" || cmd.TLSKey != "" {
  1497  			errs = multierror.Append(
  1498  				errs,
  1499  				errors.New("cannot specify --enable-lets-encrypt if --tls-cert or --tls-key are set"),
  1500  			)
  1501  		}
  1502  	case cmd.TLSCert != "" && cmd.TLSKey != "":
  1503  		if cmd.ExternalURL.URL.Scheme != "https" {
  1504  			errs = multierror.Append(
  1505  				errs,
  1506  				errors.New("must specify HTTPS external-url to use TLS"),
  1507  			)
  1508  		}
  1509  	default:
  1510  		errs = multierror.Append(
  1511  			errs,
  1512  			errors.New("must specify --tls-cert and --tls-key, or --enable-lets-encrypt to use TLS"),
  1513  		)
  1514  	}
  1515  
  1516  	if err := cmd.validateCustomRoles(); err != nil {
  1517  		errs = multierror.Append(errs, err)
  1518  	}
  1519  
  1520  	return errs.ErrorOrNil()
  1521  }
  1522  
  1523  func (cmd *RunCommand) nonTLSBindAddr() string {
  1524  	return fmt.Sprintf("%s:%d", cmd.BindIP, cmd.BindPort)
  1525  }
  1526  
  1527  func (cmd *RunCommand) tlsBindAddr() string {
  1528  	return fmt.Sprintf("%s:%d", cmd.BindIP, cmd.TLSBindPort)
  1529  }
  1530  
  1531  func (cmd *RunCommand) debugBindAddr() string {
  1532  	return fmt.Sprintf("%s:%d", cmd.DebugBindIP, cmd.DebugBindPort)
  1533  }
  1534  
  1535  func (cmd *RunCommand) configureMetrics(logger lager.Logger) error {
  1536  	host := cmd.Metrics.HostName
  1537  	if host == "" {
  1538  		host, _ = os.Hostname()
  1539  	}
  1540  
  1541  	return metric.Metrics.Initialize(logger.Session("metrics"), host, cmd.Metrics.Attributes, cmd.Metrics.BufferSize)
  1542  }
  1543  
  1544  func (cmd *RunCommand) constructDBConn(
  1545  	driverName string,
  1546  	logger lager.Logger,
  1547  	maxConns int,
  1548  	idleConns int,
  1549  	connectionName string,
  1550  	lockFactory lock.LockFactory,
  1551  ) (db.Conn, error) {
  1552  	dbConn, err := db.Open(logger.Session("db"), driverName, cmd.Postgres.ConnectionString(), cmd.newKey(), cmd.oldKey(), connectionName, lockFactory)
  1553  	if err != nil {
  1554  		return nil, fmt.Errorf("failed to migrate database: %s", err)
  1555  	}
  1556  
  1557  	// Instrument with Metrics
  1558  	dbConn = metric.CountQueries(dbConn)
  1559  	metric.Metrics.Databases = append(metric.Metrics.Databases, dbConn)
  1560  
  1561  	// Instrument with Logging
  1562  	if cmd.LogDBQueries {
  1563  		dbConn = db.Log(logger.Session("log-conn"), dbConn)
  1564  	}
  1565  
  1566  	// Prepare
  1567  	dbConn.SetMaxOpenConns(maxConns)
  1568  	dbConn.SetMaxIdleConns(idleConns)
  1569  
  1570  	return dbConn, nil
  1571  }
  1572  
  1573  type Closer interface {
  1574  	Close() error
  1575  }
  1576  
  1577  func (cmd *RunCommand) constructLockConn(driverName string) (*sql.DB, error) {
  1578  	dbConn, err := sql.Open(driverName, cmd.Postgres.ConnectionString())
  1579  	if err != nil {
  1580  		return nil, err
  1581  	}
  1582  
  1583  	dbConn.SetMaxOpenConns(1)
  1584  	dbConn.SetMaxIdleConns(1)
  1585  	dbConn.SetConnMaxLifetime(0)
  1586  
  1587  	return dbConn, nil
  1588  }
  1589  
  1590  func (cmd *RunCommand) chooseBuildContainerStrategy() (worker.ContainerPlacementStrategy, error) {
  1591  	return worker.NewContainerPlacementStrategy(cmd.ContainerPlacementStrategyOptions)
  1592  }
  1593  
  1594  func (cmd *RunCommand) configureAuthForDefaultTeam(teamFactory db.TeamFactory) error {
  1595  	team, found, err := teamFactory.FindTeam(atc.DefaultTeamName)
  1596  	if err != nil {
  1597  		return err
  1598  	}
  1599  
  1600  	if !found {
  1601  		return errors.New("default team not found")
  1602  	}
  1603  
  1604  	auth, err := cmd.Auth.MainTeamFlags.Format()
  1605  	if err != nil {
  1606  		return fmt.Errorf("default team auth not configured: %v", err)
  1607  	}
  1608  
  1609  	err = team.UpdateProviderAuth(auth)
  1610  	if err != nil {
  1611  		return err
  1612  	}
  1613  
  1614  	return nil
  1615  }
  1616  
  1617  func (cmd *RunCommand) constructEngine(
  1618  	workerPool worker.Pool,
  1619  	workerClient worker.Client,
  1620  	resourceFactory resource.ResourceFactory,
  1621  	teamFactory db.TeamFactory,
  1622  	buildFactory db.BuildFactory,
  1623  	resourceCacheFactory db.ResourceCacheFactory,
  1624  	resourceConfigFactory db.ResourceConfigFactory,
  1625  	secretManager creds.Secrets,
  1626  	defaultLimits atc.ContainerLimits,
  1627  	strategy worker.ContainerPlacementStrategy,
  1628  	lockFactory lock.LockFactory,
  1629  	rateLimiter engine.RateLimiter,
  1630  	policyChecker policy.Checker,
  1631  ) engine.Engine {
  1632  	return engine.NewEngine(
  1633  		engine.NewStepperFactory(
  1634  			engine.NewCoreStepFactory(
  1635  				workerPool,
  1636  				workerClient,
  1637  				resourceFactory,
  1638  				teamFactory,
  1639  				buildFactory,
  1640  				resourceCacheFactory,
  1641  				resourceConfigFactory,
  1642  				defaultLimits,
  1643  				strategy,
  1644  				lockFactory,
  1645  				cmd.GlobalResourceCheckTimeout,
  1646  			),
  1647  			cmd.ExternalURL.String(),
  1648  			rateLimiter,
  1649  			policyChecker,
  1650  		),
  1651  		secretManager,
  1652  		cmd.varSourcePool,
  1653  	)
  1654  }
  1655  
  1656  func (cmd *RunCommand) constructHTTPHandler(
  1657  	logger lager.Logger,
  1658  	webHandler http.Handler,
  1659  	apiHandler http.Handler,
  1660  	authHandler http.Handler,
  1661  	loginHandler http.Handler,
  1662  	legacyHandler http.Handler,
  1663  	middleware token.Middleware,
  1664  ) http.Handler {
  1665  
  1666  	csrfHandler := auth.CSRFValidationHandler(
  1667  		apiHandler,
  1668  		middleware,
  1669  	)
  1670  
  1671  	webMux := http.NewServeMux()
  1672  	webMux.Handle("/api/v1/", csrfHandler)
  1673  	webMux.Handle("/sky/issuer/", authHandler)
  1674  	webMux.Handle("/sky/", loginHandler)
  1675  	webMux.Handle("/auth/", legacyHandler)
  1676  	webMux.Handle("/login", legacyHandler)
  1677  	webMux.Handle("/logout", legacyHandler)
  1678  	webMux.Handle("/", webHandler)
  1679  
  1680  	httpHandler := wrappa.LoggerHandler{
  1681  		Logger: logger,
  1682  
  1683  		Handler: wrappa.SecurityHandler{
  1684  			XFrameOptions: cmd.Server.XFrameOptions,
  1685  
  1686  			// proxy Authorization header to/from auth cookie,
  1687  			// to support auth from JS (EventSource) and custom JWT auth
  1688  			Handler: auth.WebAuthHandler{
  1689  				Handler:    webMux,
  1690  				Middleware: middleware,
  1691  			},
  1692  		},
  1693  	}
  1694  
  1695  	return httpHandler
  1696  }
  1697  
  1698  func (cmd *RunCommand) constructLegacyHandler(
  1699  	logger lager.Logger,
  1700  ) (http.Handler, error) {
  1701  	return legacyserver.NewLegacyServer(&legacyserver.LegacyConfig{
  1702  		Logger: logger.Session("legacy"),
  1703  	})
  1704  }
  1705  
  1706  func (cmd *RunCommand) constructAuthHandler(
  1707  	logger lager.Logger,
  1708  	storage storage.Storage,
  1709  	accessTokenFactory db.AccessTokenFactory,
  1710  	userFactory db.UserFactory,
  1711  ) (http.Handler, error) {
  1712  
  1713  	issuerPath, _ := url.Parse("/sky/issuer")
  1714  	redirectPath, _ := url.Parse("/sky/callback")
  1715  
  1716  	issuerURL := cmd.ExternalURL.URL.ResolveReference(issuerPath)
  1717  	redirectURL := cmd.ExternalURL.URL.ResolveReference(redirectPath)
  1718  
  1719  	// Add public fly client
  1720  	cmd.Auth.AuthFlags.Clients[flyClientID] = flyClientSecret
  1721  
  1722  	dexServer, err := dexserver.NewDexServer(&dexserver.DexConfig{
  1723  		Logger:      logger.Session("dex"),
  1724  		Users:       cmd.Auth.AuthFlags.LocalUsers,
  1725  		Clients:     cmd.Auth.AuthFlags.Clients,
  1726  		Expiration:  cmd.Auth.AuthFlags.Expiration,
  1727  		IssuerURL:   issuerURL.String(),
  1728  		RedirectURL: redirectURL.String(),
  1729  		WebHostURL:  "/sky/issuer",
  1730  		SigningKey:  cmd.Auth.AuthFlags.SigningKey.PrivateKey,
  1731  		Storage:     storage,
  1732  	})
  1733  	if err != nil {
  1734  		return nil, err
  1735  	}
  1736  
  1737  	return token.StoreAccessToken(
  1738  		logger.Session("dex-server"),
  1739  		dexServer,
  1740  		token.Factory{},
  1741  		token.NewClaimsParser(),
  1742  		accessTokenFactory,
  1743  		userFactory,
  1744  	), nil
  1745  }
  1746  
  1747  func (cmd *RunCommand) constructLoginHandler(
  1748  	logger lager.Logger,
  1749  	httpClient *http.Client,
  1750  	middleware token.Middleware,
  1751  ) (http.Handler, error) {
  1752  
  1753  	authPath, _ := url.Parse("/sky/issuer/auth")
  1754  	tokenPath, _ := url.Parse("/sky/issuer/token")
  1755  	redirectPath, _ := url.Parse("/sky/callback")
  1756  
  1757  	authURL := cmd.ExternalURL.URL.ResolveReference(authPath)
  1758  	tokenURL := cmd.ExternalURL.URL.ResolveReference(tokenPath)
  1759  	redirectURL := cmd.ExternalURL.URL.ResolveReference(redirectPath)
  1760  
  1761  	endpoint := oauth2.Endpoint{
  1762  		AuthURL:   authURL.String(),
  1763  		TokenURL:  tokenURL.String(),
  1764  		AuthStyle: oauth2.AuthStyleInHeader,
  1765  	}
  1766  
  1767  	oauth2Config := &oauth2.Config{
  1768  		Endpoint:     endpoint,
  1769  		ClientID:     cmd.Server.ClientID,
  1770  		ClientSecret: cmd.Server.ClientSecret,
  1771  		RedirectURL:  redirectURL.String(),
  1772  		Scopes:       []string{"openid", "profile", "email", "federated:id", "groups"},
  1773  	}
  1774  
  1775  	skyServer, err := skyserver.NewSkyServer(&skyserver.SkyConfig{
  1776  		Logger:          logger.Session("sky"),
  1777  		TokenMiddleware: middleware,
  1778  		TokenParser:     token.Factory{},
  1779  		OAuthConfig:     oauth2Config,
  1780  		HTTPClient:      httpClient,
  1781  	})
  1782  	if err != nil {
  1783  		return nil, err
  1784  	}
  1785  
  1786  	return skyserver.NewSkyHandler(skyServer), nil
  1787  }
  1788  
  1789  func (cmd *RunCommand) constructTokenVerifier(accessTokenFactory db.AccessTokenFactory) accessor.TokenVerifier {
  1790  
  1791  	validClients := []string{flyClientID}
  1792  	for clientId := range cmd.Auth.AuthFlags.Clients {
  1793  		validClients = append(validClients, clientId)
  1794  	}
  1795  
  1796  	MiB := 1024 * 1024
  1797  	claimsCacher := accessor.NewClaimsCacher(accessTokenFactory, 1*MiB)
  1798  
  1799  	return accessor.NewVerifier(claimsCacher, validClients)
  1800  }
  1801  
  1802  func (cmd *RunCommand) constructAPIHandler(
  1803  	logger lager.Logger,
  1804  	reconfigurableSink *lager.ReconfigurableSink,
  1805  	teamFactory db.TeamFactory,
  1806  	workerTeamFactory db.TeamFactory,
  1807  	dbPipelineFactory db.PipelineFactory,
  1808  	dbJobFactory db.JobFactory,
  1809  	dbResourceFactory db.ResourceFactory,
  1810  	dbWorkerFactory db.WorkerFactory,
  1811  	dbVolumeRepository db.VolumeRepository,
  1812  	dbContainerRepository db.ContainerRepository,
  1813  	gcContainerDestroyer gc.Destroyer,
  1814  	dbBuildFactory db.BuildFactory,
  1815  	dbCheckFactory db.CheckFactory,
  1816  	resourceConfigFactory db.ResourceConfigFactory,
  1817  	dbUserFactory db.UserFactory,
  1818  	workerClient worker.Client,
  1819  	secretManager creds.Secrets,
  1820  	credsManagers creds.Managers,
  1821  	accessFactory accessor.AccessFactory,
  1822  	dbWall db.Wall,
  1823  	policyChecker policy.Checker,
  1824  ) (http.Handler, error) {
  1825  
  1826  	checkPipelineAccessHandlerFactory := auth.NewCheckPipelineAccessHandlerFactory(teamFactory)
  1827  	checkBuildReadAccessHandlerFactory := auth.NewCheckBuildReadAccessHandlerFactory(dbBuildFactory)
  1828  	checkBuildWriteAccessHandlerFactory := auth.NewCheckBuildWriteAccessHandlerFactory(dbBuildFactory)
  1829  	checkWorkerTeamAccessHandlerFactory := auth.NewCheckWorkerTeamAccessHandlerFactory(dbWorkerFactory)
  1830  
  1831  	rejectArchivedHandlerFactory := pipelineserver.NewRejectArchivedHandlerFactory(teamFactory)
  1832  
  1833  	aud := auditor.NewAuditor(
  1834  		cmd.Auditor.EnableBuildAuditLog,
  1835  		cmd.Auditor.EnableContainerAuditLog,
  1836  		cmd.Auditor.EnableJobAuditLog,
  1837  		cmd.Auditor.EnablePipelineAuditLog,
  1838  		cmd.Auditor.EnableResourceAuditLog,
  1839  		cmd.Auditor.EnableSystemAuditLog,
  1840  		cmd.Auditor.EnableTeamAuditLog,
  1841  		cmd.Auditor.EnableWorkerAuditLog,
  1842  		cmd.Auditor.EnableVolumeAuditLog,
  1843  		logger,
  1844  	)
  1845  
  1846  	customRoles, err := cmd.parseCustomRoles()
  1847  	if err != nil {
  1848  		return nil, err
  1849  	}
  1850  
  1851  	apiWrapper := wrappa.MultiWrappa{
  1852  		wrappa.NewConcurrentRequestLimitsWrappa(
  1853  			logger,
  1854  			wrappa.NewConcurrentRequestPolicy(cmd.ConcurrentRequestLimits),
  1855  		),
  1856  		wrappa.NewAPIMetricsWrappa(logger),
  1857  		wrappa.NewPolicyCheckWrappa(logger, policychecker.NewApiPolicyChecker(policyChecker)),
  1858  		wrappa.NewAPIAuthWrappa(
  1859  			checkPipelineAccessHandlerFactory,
  1860  			checkBuildReadAccessHandlerFactory,
  1861  			checkBuildWriteAccessHandlerFactory,
  1862  			checkWorkerTeamAccessHandlerFactory,
  1863  		),
  1864  		wrappa.NewRejectArchivedWrappa(rejectArchivedHandlerFactory),
  1865  		wrappa.NewConcourseVersionWrappa(concourse.Version),
  1866  		wrappa.NewAccessorWrappa(
  1867  			logger,
  1868  			accessFactory,
  1869  			aud,
  1870  			customRoles,
  1871  		),
  1872  		wrappa.NewCompressionWrappa(logger),
  1873  	}
  1874  
  1875  	return api.NewHandler(
  1876  		logger,
  1877  		cmd.ExternalURL.String(),
  1878  		cmd.Server.ClusterName,
  1879  		apiWrapper,
  1880  
  1881  		teamFactory,
  1882  		dbPipelineFactory,
  1883  		dbJobFactory,
  1884  		dbResourceFactory,
  1885  		dbWorkerFactory,
  1886  		workerTeamFactory,
  1887  		dbVolumeRepository,
  1888  		dbContainerRepository,
  1889  		gcContainerDestroyer,
  1890  		dbBuildFactory,
  1891  		dbCheckFactory,
  1892  		resourceConfigFactory,
  1893  		dbUserFactory,
  1894  
  1895  		buildserver.NewEventHandler,
  1896  
  1897  		workerClient,
  1898  
  1899  		reconfigurableSink,
  1900  
  1901  		cmd.isTLSEnabled(),
  1902  
  1903  		cmd.CLIArtifactsDir.Path(),
  1904  		concourse.Version,
  1905  		concourse.WorkerVersion,
  1906  		secretManager,
  1907  		cmd.varSourcePool,
  1908  		credsManagers,
  1909  		containerserver.NewInterceptTimeoutFactory(cmd.InterceptIdleTimeout),
  1910  		time.Minute,
  1911  		dbWall,
  1912  		clock.NewClock(),
  1913  	)
  1914  }
  1915  
  1916  type tlsRedirectHandler struct {
  1917  	matchHostname string
  1918  	externalHost  string
  1919  	baseHandler   http.Handler
  1920  }
  1921  
  1922  func (h tlsRedirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
  1923  	if strings.HasPrefix(r.Host, h.matchHostname) && (r.Method == "GET" || r.Method == "HEAD") {
  1924  		u := url.URL{
  1925  			Scheme:   "https",
  1926  			Host:     h.externalHost,
  1927  			Path:     r.URL.Path,
  1928  			RawQuery: r.URL.RawQuery,
  1929  		}
  1930  
  1931  		http.Redirect(w, r, u.String(), http.StatusMovedPermanently)
  1932  	} else {
  1933  		h.baseHandler.ServeHTTP(w, r)
  1934  	}
  1935  }
  1936  
  1937  func (cmd *RunCommand) isTLSEnabled() bool {
  1938  	return cmd.TLSBindPort != 0
  1939  }
  1940  
  1941  type drainRunner struct {
  1942  	logger  lager.Logger
  1943  	drainer component.Drainable
  1944  }
  1945  
  1946  func (runner drainRunner) Run(signals <-chan os.Signal, ready chan<- struct{}) error {
  1947  	close(ready)
  1948  	<-signals
  1949  	runner.drainer.Drain(lagerctx.NewContext(context.Background(), runner.logger))
  1950  	return nil
  1951  }
  1952  
  1953  type RunnableComponent struct {
  1954  	atc.Component
  1955  	component.Runnable
  1956  }