github.com/ferranbt/nomad@v0.9.3-0.20190607002617-85c449b7667c/command/job_init.go (about)

     1  package command
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"os"
     7  	"strings"
     8  
     9  	"github.com/posener/complete"
    10  )
    11  
    12  const (
    13  	// DefaultInitName is the default name we use when
    14  	// initializing the example file
    15  	DefaultInitName = "example.nomad"
    16  )
    17  
    18  // JobInitCommand generates a new job template that you can customize to your
    19  // liking, like vagrant init
    20  type JobInitCommand struct {
    21  	Meta
    22  }
    23  
    24  func (c *JobInitCommand) Help() string {
    25  	helpText := `
    26  Usage: nomad job init
    27  Alias: nomad init
    28  
    29    Creates an example job file that can be used as a starting
    30    point to customize further.
    31  
    32  Init Options:
    33  
    34    -short
    35      If the short flag is set, a minimal jobspec without comments is emitted.
    36  `
    37  	return strings.TrimSpace(helpText)
    38  }
    39  
    40  func (c *JobInitCommand) Synopsis() string {
    41  	return "Create an example job file"
    42  }
    43  
    44  func (c *JobInitCommand) AutocompleteFlags() complete.Flags {
    45  	return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
    46  		complete.Flags{
    47  			"-short": complete.PredictNothing,
    48  		})
    49  }
    50  
    51  func (c *JobInitCommand) AutocompleteArgs() complete.Predictor {
    52  	return complete.PredictNothing
    53  }
    54  
    55  func (c *JobInitCommand) Name() string { return "job init" }
    56  
    57  func (c *JobInitCommand) Run(args []string) int {
    58  	var short bool
    59  
    60  	flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
    61  	flags.Usage = func() { c.Ui.Output(c.Help()) }
    62  	flags.BoolVar(&short, "short", false, "")
    63  
    64  	if err := flags.Parse(args); err != nil {
    65  		return 1
    66  	}
    67  
    68  	// Check for misuse
    69  	if len(flags.Args()) != 0 {
    70  		c.Ui.Error("This command takes no arguments")
    71  		c.Ui.Error(commandErrorText(c))
    72  		return 1
    73  	}
    74  
    75  	// Check if the file already exists
    76  	_, err := os.Stat(DefaultInitName)
    77  	if err != nil && !os.IsNotExist(err) {
    78  		c.Ui.Error(fmt.Sprintf("Failed to stat '%s': %v", DefaultInitName, err))
    79  		return 1
    80  	}
    81  	if !os.IsNotExist(err) {
    82  		c.Ui.Error(fmt.Sprintf("Job '%s' already exists", DefaultInitName))
    83  		return 1
    84  	}
    85  
    86  	var jobSpec []byte
    87  
    88  	if short {
    89  		jobSpec = []byte(shortJob)
    90  	} else {
    91  		jobSpec = []byte(defaultJob)
    92  	}
    93  
    94  	// Write out the example
    95  	err = ioutil.WriteFile(DefaultInitName, jobSpec, 0660)
    96  	if err != nil {
    97  		c.Ui.Error(fmt.Sprintf("Failed to write '%s': %v", DefaultInitName, err))
    98  		return 1
    99  	}
   100  
   101  	// Success
   102  	c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName))
   103  	return 0
   104  }
   105  
   106  var shortJob = strings.TrimSpace(`
   107  job "example" {
   108    datacenters = ["dc1"]
   109  
   110    group "cache" {
   111      task "redis" {
   112        driver = "docker"
   113  
   114        config {
   115          image = "redis:3.2"
   116          port_map {
   117            db = 6379
   118          }
   119        }
   120  
   121        resources {
   122          cpu    = 500
   123          memory = 256
   124          network {
   125            mbits = 10
   126            port "db" {}
   127          }
   128        }
   129  
   130        service {
   131          name = "redis-cache"
   132          tags = ["global", "cache"]
   133          port = "db"
   134          check {
   135            name     = "alive"
   136            type     = "tcp"
   137            interval = "10s"
   138            timeout  = "2s"
   139          }
   140        }
   141      }
   142    }
   143  }
   144  `)
   145  
   146  var defaultJob = strings.TrimSpace(`
   147  # There can only be a single job definition per file. This job is named
   148  # "example" so it will create a job with the ID and Name "example".
   149  
   150  # The "job" stanza is the top-most configuration option in the job
   151  # specification. A job is a declarative specification of tasks that Nomad
   152  # should run. Jobs have a globally unique name, one or many task groups, which
   153  # are themselves collections of one or many tasks.
   154  #
   155  # For more information and examples on the "job" stanza, please see
   156  # the online documentation at:
   157  #
   158  #     https://www.nomadproject.io/docs/job-specification/job.html
   159  #
   160  job "example" {
   161    # The "region" parameter specifies the region in which to execute the job. If
   162    # omitted, this inherits the default region name of "global".
   163    # region = "global"
   164  
   165    # The "datacenters" parameter specifies the list of datacenters which should
   166    # be considered when placing this task. This must be provided.
   167    datacenters = ["dc1"]
   168  
   169    # The "type" parameter controls the type of job, which impacts the scheduler's
   170    # decision on placement. This configuration is optional and defaults to
   171    # "service". For a full list of job types and their differences, please see
   172    # the online documentation.
   173    #
   174    # For more information, please see the online documentation at:
   175    #
   176    #     https://www.nomadproject.io/docs/jobspec/schedulers.html
   177    #
   178    type = "service"
   179  
   180    # The "constraint" stanza defines additional constraints for placing this job,
   181    # in addition to any resource or driver constraints. This stanza may be placed
   182    # at the "job", "group", or "task" level, and supports variable interpolation.
   183    #
   184    # For more information and examples on the "constraint" stanza, please see
   185    # the online documentation at:
   186    #
   187    #     https://www.nomadproject.io/docs/job-specification/constraint.html
   188    #
   189    # constraint {
   190    #   attribute = "${attr.kernel.name}"
   191    #   value     = "linux"
   192    # }
   193  
   194    # The "update" stanza specifies the update strategy of task groups. The update
   195    # strategy is used to control things like rolling upgrades, canaries, and
   196    # blue/green deployments. If omitted, no update strategy is enforced. The
   197    # "update" stanza may be placed at the job or task group. When placed at the
   198    # job, it applies to all groups within the job. When placed at both the job and
   199    # group level, the stanzas are merged with the group's taking precedence.
   200    #
   201    # For more information and examples on the "update" stanza, please see
   202    # the online documentation at:
   203    #
   204    #     https://www.nomadproject.io/docs/job-specification/update.html
   205    #
   206    update {
   207      # The "max_parallel" parameter specifies the maximum number of updates to
   208      # perform in parallel. In this case, this specifies to update a single task
   209      # at a time.
   210      max_parallel = 1
   211  
   212      # The "min_healthy_time" parameter specifies the minimum time the allocation
   213      # must be in the healthy state before it is marked as healthy and unblocks
   214      # further allocations from being updated.
   215      min_healthy_time = "10s"
   216  
   217      # The "healthy_deadline" parameter specifies the deadline in which the
   218      # allocation must be marked as healthy after which the allocation is
   219      # automatically transitioned to unhealthy. Transitioning to unhealthy will
   220      # fail the deployment and potentially roll back the job if "auto_revert" is
   221      # set to true.
   222      healthy_deadline = "3m"
   223  
   224      # The "progress_deadline" parameter specifies the deadline in which an
   225      # allocation must be marked as healthy. The deadline begins when the first
   226      # allocation for the deployment is created and is reset whenever an allocation
   227      # as part of the deployment transitions to a healthy state. If no allocation
   228      # transitions to the healthy state before the progress deadline, the
   229      # deployment is marked as failed.
   230      progress_deadline = "10m"
   231  
   232      # The "auto_revert" parameter specifies if the job should auto-revert to the
   233      # last stable job on deployment failure. A job is marked as stable if all the
   234      # allocations as part of its deployment were marked healthy.
   235      auto_revert = false
   236  
   237      # The "canary" parameter specifies that changes to the job that would result
   238      # in destructive updates should create the specified number of canaries
   239      # without stopping any previous allocations. Once the operator determines the
   240      # canaries are healthy, they can be promoted which unblocks a rolling update
   241      # of the remaining allocations at a rate of "max_parallel".
   242      #
   243      # Further, setting "canary" equal to the count of the task group allows
   244      # blue/green deployments. When the job is updated, a full set of the new
   245      # version is deployed and upon promotion the old version is stopped.
   246      canary = 0
   247    }
   248  
   249    # The migrate stanza specifies the group's strategy for migrating off of
   250    # draining nodes. If omitted, a default migration strategy is applied.
   251    #
   252    # For more information on the "migrate" stanza, please see
   253    # the online documentation at:
   254    #
   255    #     https://www.nomadproject.io/docs/job-specification/migrate.html
   256    #
   257    migrate {
   258      # Specifies the number of task groups that can be migrated at the same
   259      # time. This number must be less than the total count for the group as
   260      # (count - max_parallel) will be left running during migrations.
   261      max_parallel = 1
   262  
   263      # Specifies the mechanism in which allocations health is determined. The
   264      # potential values are "checks" or "task_states".
   265      health_check = "checks"
   266  
   267      # Specifies the minimum time the allocation must be in the healthy state
   268      # before it is marked as healthy and unblocks further allocations from being
   269      # migrated. This is specified using a label suffix like "30s" or "15m".
   270      min_healthy_time = "10s"
   271  
   272      # Specifies the deadline in which the allocation must be marked as healthy
   273      # after which the allocation is automatically transitioned to unhealthy. This
   274      # is specified using a label suffix like "2m" or "1h".
   275      healthy_deadline = "5m"
   276    }
   277  
   278    # The "group" stanza defines a series of tasks that should be co-located on
   279    # the same Nomad client. Any task within a group will be placed on the same
   280    # client.
   281    #
   282    # For more information and examples on the "group" stanza, please see
   283    # the online documentation at:
   284    #
   285    #     https://www.nomadproject.io/docs/job-specification/group.html
   286    #
   287    group "cache" {
   288      # The "count" parameter specifies the number of the task groups that should
   289      # be running under this group. This value must be non-negative and defaults
   290      # to 1.
   291      count = 1
   292  
   293      # The "restart" stanza configures a group's behavior on task failure. If
   294      # left unspecified, a default restart policy is used based on the job type.
   295      #
   296      # For more information and examples on the "restart" stanza, please see
   297      # the online documentation at:
   298      #
   299      #     https://www.nomadproject.io/docs/job-specification/restart.html
   300      #
   301      restart {
   302        # The number of attempts to run the job within the specified interval.
   303        attempts = 2
   304        interval = "30m"
   305  
   306        # The "delay" parameter specifies the duration to wait before restarting
   307        # a task after it has failed.
   308        delay = "15s"
   309  
   310       # The "mode" parameter controls what happens when a task has restarted
   311       # "attempts" times within the interval. "delay" mode delays the next
   312       # restart until the next interval. "fail" mode does not restart the task
   313       # if "attempts" has been hit within the interval.
   314        mode = "fail"
   315      }
   316  
   317      # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
   318      # instead of a hard disk requirement. Clients using this stanza should
   319      # not specify disk requirements in the resources stanza of the task. All
   320      # tasks in this group will share the same ephemeral disk.
   321      #
   322      # For more information and examples on the "ephemeral_disk" stanza, please
   323      # see the online documentation at:
   324      #
   325      #     https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
   326      #
   327      ephemeral_disk {
   328        # When sticky is true and the task group is updated, the scheduler
   329        # will prefer to place the updated allocation on the same node and
   330        # will migrate the data. This is useful for tasks that store data
   331        # that should persist across allocation updates.
   332        # sticky = true
   333        #
   334        # Setting migrate to true results in the allocation directory of a
   335        # sticky allocation directory to be migrated.
   336        # migrate = true
   337  
   338        # The "size" parameter specifies the size in MB of shared ephemeral disk
   339        # between tasks in the group.
   340        size = 300
   341      }
   342  
   343  
   344      # The "affinity" stanza enables operators to express placement preferences
   345      # based on node attributes or metadata.
   346      #
   347      # For more information and examples on the "affinity" stanza, please
   348      # see the online documentation at:
   349      #
   350      #     https://www.nomadproject.io/docs/job-specification/affinity.html
   351      #
   352      # affinity {
   353         # attribute specifies the name of a node attribute or metadata
   354         # attribute = "${node.datacenter}"
   355  
   356         # value specifies the desired attribute value. In this example Nomad
   357         # will prefer placement in the "us-west1" datacenter.
   358         # value  = "us-west1"
   359  
   360         # weight can be used to indicate relative preference
   361         # when the job has more than one affinity. It defaults to 50 if not set.
   362         # weight = 100
   363      #  }
   364  
   365      # The "spread" stanza allows operators to increase the failure tolerance of
   366      # their applications by specifying a node attribute that allocations
   367      # should be spread over.
   368      #
   369      # For more information and examples on the "spread" stanza, please
   370      # see the online documentation at:
   371      #
   372      #     https://www.nomadproject.io/docs/job-specification/spread.html
   373      #
   374      # spread {
   375         # attribute specifies the name of a node attribute or metadata
   376         # attribute = "${node.datacenter}"
   377      
   378         # targets can be used to define desired percentages of allocations
   379         # for each targeted attribute value.
   380         #
   381         #   target "us-east1" {
   382         #     percent = 60
   383         #   }
   384         #   target "us-west1" {
   385         #     percent = 40
   386         #   }
   387      #  }
   388  
   389      # The "task" stanza creates an individual unit of work, such as a Docker
   390      # container, web application, or batch processing.
   391      #
   392      # For more information and examples on the "task" stanza, please see
   393      # the online documentation at:
   394      #
   395      #     https://www.nomadproject.io/docs/job-specification/task.html
   396      #
   397      task "redis" {
   398        # The "driver" parameter specifies the task driver that should be used to
   399        # run the task.
   400        driver = "docker"
   401  
   402        # The "config" stanza specifies the driver configuration, which is passed
   403        # directly to the driver to start the task. The details of configurations
   404        # are specific to each driver, so please see specific driver
   405        # documentation for more information.
   406        config {
   407          image = "redis:3.2"
   408          port_map {
   409            db = 6379
   410          }
   411        }
   412  
   413        # The "artifact" stanza instructs Nomad to download an artifact from a
   414        # remote source prior to starting the task. This provides a convenient
   415        # mechanism for downloading configuration files or data needed to run the
   416        # task. It is possible to specify the "artifact" stanza multiple times to
   417        # download multiple artifacts.
   418        #
   419        # For more information and examples on the "artifact" stanza, please see
   420        # the online documentation at:
   421        #
   422        #     https://www.nomadproject.io/docs/job-specification/artifact.html
   423        #
   424        # artifact {
   425        #   source = "http://foo.com/artifact.tar.gz"
   426        #   options {
   427        #     checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
   428        #   }
   429        # }
   430  
   431        # The "logs" stanza instructs the Nomad client on how many log files and
   432        # the maximum size of those logs files to retain. Logging is enabled by
   433        # default, but the "logs" stanza allows for finer-grained control over
   434        # the log rotation and storage configuration.
   435        #
   436        # For more information and examples on the "logs" stanza, please see
   437        # the online documentation at:
   438        #
   439        #     https://www.nomadproject.io/docs/job-specification/logs.html
   440        #
   441        # logs {
   442        #   max_files     = 10
   443        #   max_file_size = 15
   444        # }
   445  
   446        # The "resources" stanza describes the requirements a task needs to
   447        # execute. Resource requirements include memory, network, cpu, and more.
   448        # This ensures the task will execute on a machine that contains enough
   449        # resource capacity.
   450        #
   451        # For more information and examples on the "resources" stanza, please see
   452        # the online documentation at:
   453        #
   454        #     https://www.nomadproject.io/docs/job-specification/resources.html
   455        #
   456        resources {
   457          cpu    = 500 # 500 MHz
   458          memory = 256 # 256MB
   459          network {
   460            mbits = 10
   461            port "db" {}
   462          }
   463        }
   464  
   465        # The "service" stanza instructs Nomad to register this task as a service
   466        # in the service discovery engine, which is currently Consul. This will
   467        # make the service addressable after Nomad has placed it on a host and
   468        # port.
   469        #
   470        # For more information and examples on the "service" stanza, please see
   471        # the online documentation at:
   472        #
   473        #     https://www.nomadproject.io/docs/job-specification/service.html
   474        #
   475        service {
   476          name = "redis-cache"
   477          tags = ["global", "cache"]
   478          port = "db"
   479          check {
   480            name     = "alive"
   481            type     = "tcp"
   482            interval = "10s"
   483            timeout  = "2s"
   484          }
   485        }
   486  
   487        # The "template" stanza instructs Nomad to manage a template, such as
   488        # a configuration file or script. This template can optionally pull data
   489        # from Consul or Vault to populate runtime configuration data.
   490        #
   491        # For more information and examples on the "template" stanza, please see
   492        # the online documentation at:
   493        #
   494        #     https://www.nomadproject.io/docs/job-specification/template.html
   495        #
   496        # template {
   497        #   data          = "---\nkey: {{ key \"service/my-key\" }}"
   498        #   destination   = "local/file.yml"
   499        #   change_mode   = "signal"
   500        #   change_signal = "SIGHUP"
   501        # }
   502  
   503        # The "template" stanza can also be used to create environment variables
   504        # for tasks that prefer those to config files. The task will be restarted
   505        # when data pulled from Consul or Vault changes.
   506        #
   507        # template {
   508        #   data        = "KEY={{ key \"service/my-key\" }}"
   509        #   destination = "local/file.env"
   510        #   env         = true
   511        # }
   512  
   513        # The "vault" stanza instructs the Nomad client to acquire a token from
   514        # a HashiCorp Vault server. The Nomad servers must be configured and
   515        # authorized to communicate with Vault. By default, Nomad will inject
   516        # The token into the job via an environment variable and make the token
   517        # available to the "template" stanza. The Nomad client handles the renewal
   518        # and revocation of the Vault token.
   519        #
   520        # For more information and examples on the "vault" stanza, please see
   521        # the online documentation at:
   522        #
   523        #     https://www.nomadproject.io/docs/job-specification/vault.html
   524        #
   525        # vault {
   526        #   policies      = ["cdn", "frontend"]
   527        #   change_mode   = "signal"
   528        #   change_signal = "SIGHUP"
   529        # }
   530  
   531        # Controls the timeout between signalling a task it will be killed
   532        # and killing the task. If not set a default is used.
   533        # kill_timeout = "20s"
   534      }
   535    }
   536  }
   537  `)