github.com/nir0s/nomad@v0.8.7-rc1/command/job_init.go (about)

     1  package command
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"os"
     7  	"strings"
     8  
     9  	"github.com/posener/complete"
    10  )
    11  
    12  const (
    13  	// DefaultInitName is the default name we use when
    14  	// initializing the example file
    15  	DefaultInitName = "example.nomad"
    16  )
    17  
    18  // JobInitCommand generates a new job template that you can customize to your
    19  // liking, like vagrant init
    20  type JobInitCommand struct {
    21  	Meta
    22  }
    23  
    24  func (c *JobInitCommand) Help() string {
    25  	helpText := `
    26  Usage: nomad job init
    27  Alias: nomad init
    28  
    29    Creates an example job file that can be used as a starting
    30    point to customize further.
    31  
    32  Init Options:
    33  
    34    -short
    35      If the short flag is set, a minimal jobspec without comments is emitted.
    36  `
    37  	return strings.TrimSpace(helpText)
    38  }
    39  
    40  func (c *JobInitCommand) Synopsis() string {
    41  	return "Create an example job file"
    42  }
    43  
    44  func (c *JobInitCommand) AutocompleteFlags() complete.Flags {
    45  	return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
    46  		complete.Flags{
    47  			"-short": complete.PredictNothing,
    48  		})
    49  }
    50  
    51  func (c *JobInitCommand) AutocompleteArgs() complete.Predictor {
    52  	return complete.PredictNothing
    53  }
    54  
    55  func (c *JobInitCommand) Name() string { return "job init" }
    56  
    57  func (c *JobInitCommand) Run(args []string) int {
    58  	var short bool
    59  
    60  	flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
    61  	flags.Usage = func() { c.Ui.Output(c.Help()) }
    62  	flags.BoolVar(&short, "short", false, "")
    63  
    64  	if err := flags.Parse(args); err != nil {
    65  		return 1
    66  	}
    67  
    68  	// Check for misuse
    69  	if len(flags.Args()) != 0 {
    70  		c.Ui.Error("This command takes no arguments")
    71  		c.Ui.Error(commandErrorText(c))
    72  		return 1
    73  	}
    74  
    75  	// Check if the file already exists
    76  	_, err := os.Stat(DefaultInitName)
    77  	if err != nil && !os.IsNotExist(err) {
    78  		c.Ui.Error(fmt.Sprintf("Failed to stat '%s': %v", DefaultInitName, err))
    79  		return 1
    80  	}
    81  	if !os.IsNotExist(err) {
    82  		c.Ui.Error(fmt.Sprintf("Job '%s' already exists", DefaultInitName))
    83  		return 1
    84  	}
    85  
    86  	var jobSpec []byte
    87  
    88  	if short {
    89  		jobSpec = []byte(shortJob)
    90  	} else {
    91  		jobSpec = []byte(defaultJob)
    92  	}
    93  
    94  	// Write out the example
    95  	err = ioutil.WriteFile(DefaultInitName, jobSpec, 0660)
    96  	if err != nil {
    97  		c.Ui.Error(fmt.Sprintf("Failed to write '%s': %v", DefaultInitName, err))
    98  		return 1
    99  	}
   100  
   101  	// Success
   102  	c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName))
   103  	return 0
   104  }
   105  
   106  var shortJob = strings.TrimSpace(`
   107  job "example" {
   108    datacenters = ["dc1"]
   109  
   110    group "cache" {
   111      task "redis" {
   112        driver = "docker"
   113  
   114        config {
   115          image = "redis:3.2"
   116          port_map {
   117            db = 6379
   118          }
   119        }
   120  
   121        resources {
   122          cpu    = 500
   123          memory = 256
   124          network {
   125            mbits = 10
   126            port "db" {}
   127          }
   128        }
   129  
   130        service {
   131          name = "redis-cache"
   132          tags = ["global", "cache"]
   133          port = "db"
   134          check {
   135            name     = "alive"
   136            type     = "tcp"
   137            interval = "10s"
   138            timeout  = "2s"
   139          }
   140        }
   141      }
   142    }
   143  }
   144  `)
   145  
   146  var defaultJob = strings.TrimSpace(`
   147  # There can only be a single job definition per file. This job is named
   148  # "example" so it will create a job with the ID and Name "example".
   149  
   150  # The "job" stanza is the top-most configuration option in the job
   151  # specification. A job is a declarative specification of tasks that Nomad
   152  # should run. Jobs have a globally unique name, one or many task groups, which
   153  # are themselves collections of one or many tasks.
   154  #
   155  # For more information and examples on the "job" stanza, please see
   156  # the online documentation at:
   157  #
   158  #     https://www.nomadproject.io/docs/job-specification/job.html
   159  #
   160  job "example" {
   161    # The "region" parameter specifies the region in which to execute the job. If
   162    # omitted, this inherits the default region name of "global".
   163    # region = "global"
   164  
   165    # The "datacenters" parameter specifies the list of datacenters which should
   166    # be considered when placing this task. This must be provided.
   167    datacenters = ["dc1"]
   168  
   169    # The "type" parameter controls the type of job, which impacts the scheduler's
   170    # decision on placement. This configuration is optional and defaults to
   171    # "service". For a full list of job types and their differences, please see
   172    # the online documentation.
   173    #
   174    # For more information, please see the online documentation at:
   175    #
   176    #     https://www.nomadproject.io/docs/jobspec/schedulers.html
   177    #
   178    type = "service"
   179  
   180    # The "constraint" stanza defines additional constraints for placing this job,
   181    # in addition to any resource or driver constraints. This stanza may be placed
   182    # at the "job", "group", or "task" level, and supports variable interpolation.
   183    #
   184    # For more information and examples on the "constraint" stanza, please see
   185    # the online documentation at:
   186    #
   187    #     https://www.nomadproject.io/docs/job-specification/constraint.html
   188    #
   189    # constraint {
   190    #   attribute = "${attr.kernel.name}"
   191    #   value     = "linux"
   192    # }
   193  
   194    # The "update" stanza specifies the update strategy of task groups. The update
   195    # strategy is used to control things like rolling upgrades, canaries, and
   196    # blue/green deployments. If omitted, no update strategy is enforced. The
   197    # "update" stanza may be placed at the job or task group. When placed at the
   198    # job, it applies to all groups within the job. When placed at both the job and
   199    # group level, the stanzas are merged with the group's taking precedence.
   200    #
   201    # For more information and examples on the "update" stanza, please see
   202    # the online documentation at:
   203    #
   204    #     https://www.nomadproject.io/docs/job-specification/update.html
   205    #
   206    update {
   207      # The "max_parallel" parameter specifies the maximum number of updates to
   208      # perform in parallel. In this case, this specifies to update a single task
   209      # at a time.
   210      max_parallel = 1
   211  
   212      # The "min_healthy_time" parameter specifies the minimum time the allocation
   213      # must be in the healthy state before it is marked as healthy and unblocks
   214      # further allocations from being updated.
   215      min_healthy_time = "10s"
   216  
   217      # The "healthy_deadline" parameter specifies the deadline in which the
   218      # allocation must be marked as healthy after which the allocation is
   219      # automatically transitioned to unhealthy. Transitioning to unhealthy will
   220      # fail the deployment and potentially roll back the job if "auto_revert" is
   221      # set to true.
   222      healthy_deadline = "3m"
   223  
   224      # The "progress_deadline" parameter specifies the deadline in which an
   225      # allocation must be marked as healthy. The deadline begins when the first
   226      # allocation for the deployment is created and is reset whenever an allocation
   227      # as part of the deployment transitions to a healthy state. If no allocation
   228      # transitions to the healthy state before the progress deadline, the
   229      # deployment is marked as failed.
   230      progress_deadline = "10m"
   231  
   232      # The "auto_revert" parameter specifies if the job should auto-revert to the
   233      # last stable job on deployment failure. A job is marked as stable if all the
   234      # allocations as part of its deployment were marked healthy.
   235      auto_revert = false
   236  
   237      # The "canary" parameter specifies that changes to the job that would result
   238      # in destructive updates should create the specified number of canaries
   239      # without stopping any previous allocations. Once the operator determines the
   240      # canaries are healthy, they can be promoted which unblocks a rolling update
   241      # of the remaining allocations at a rate of "max_parallel".
   242      #
   243      # Further, setting "canary" equal to the count of the task group allows
   244      # blue/green deployments. When the job is updated, a full set of the new
   245      # version is deployed and upon promotion the old version is stopped.
   246      canary = 0
   247    }
   248  
   249    # The migrate stanza specifies the group's strategy for migrating off of
   250    # draining nodes. If omitted, a default migration strategy is applied.
   251    #
   252    # For more information on the "migrate" stanza, please see
   253    # the online documentation at:
   254    #
   255    #     https://www.nomadproject.io/docs/job-specification/migrate.html
   256    #
   257    migrate {
   258      # Specifies the number of task groups that can be migrated at the same
   259      # time. This number must be less than the total count for the group as
   260      # (count - max_parallel) will be left running during migrations.
   261      max_parallel = 1
   262  
   263      # Specifies the mechanism in which allocations health is determined. The
   264      # potential values are "checks" or "task_states".
   265      health_check = "checks"
   266  
   267      # Specifies the minimum time the allocation must be in the healthy state
   268      # before it is marked as healthy and unblocks further allocations from being
   269      # migrated. This is specified using a label suffix like "30s" or "15m".
   270      min_healthy_time = "10s"
   271  
   272      # Specifies the deadline in which the allocation must be marked as healthy
   273      # after which the allocation is automatically transitioned to unhealthy. This
   274      # is specified using a label suffix like "2m" or "1h".
   275      healthy_deadline = "5m"
   276    }
   277  
   278    # The "group" stanza defines a series of tasks that should be co-located on
   279    # the same Nomad client. Any task within a group will be placed on the same
   280    # client.
   281    #
   282    # For more information and examples on the "group" stanza, please see
   283    # the online documentation at:
   284    #
   285    #     https://www.nomadproject.io/docs/job-specification/group.html
   286    #
   287    group "cache" {
   288      # The "count" parameter specifies the number of the task groups that should
   289      # be running under this group. This value must be non-negative and defaults
   290      # to 1.
   291      count = 1
   292  
   293      # The "restart" stanza configures a group's behavior on task failure. If
   294      # left unspecified, a default restart policy is used based on the job type.
   295      #
   296      # For more information and examples on the "restart" stanza, please see
   297      # the online documentation at:
   298      #
   299      #     https://www.nomadproject.io/docs/job-specification/restart.html
   300      #
   301      restart {
   302        # The number of attempts to run the job within the specified interval.
   303        attempts = 2
   304        interval = "30m"
   305  
   306        # The "delay" parameter specifies the duration to wait before restarting
   307        # a task after it has failed.
   308        delay = "15s"
   309  
   310       # The "mode" parameter controls what happens when a task has restarted
   311       # "attempts" times within the interval. "delay" mode delays the next
   312       # restart until the next interval. "fail" mode does not restart the task
   313       # if "attempts" has been hit within the interval.
   314        mode = "fail"
   315      }
   316  
   317      # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
   318      # instead of a hard disk requirement. Clients using this stanza should
   319      # not specify disk requirements in the resources stanza of the task. All
   320      # tasks in this group will share the same ephemeral disk.
   321      #
   322      # For more information and examples on the "ephemeral_disk" stanza, please
   323      # see the online documentation at:
   324      #
   325      #     https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
   326      #
   327      ephemeral_disk {
   328        # When sticky is true and the task group is updated, the scheduler
   329        # will prefer to place the updated allocation on the same node and
   330        # will migrate the data. This is useful for tasks that store data
   331        # that should persist across allocation updates.
   332        # sticky = true
   333        #
   334        # Setting migrate to true results in the allocation directory of a
   335        # sticky allocation directory to be migrated.
   336        # migrate = true
   337  
   338        # The "size" parameter specifies the size in MB of shared ephemeral disk
   339        # between tasks in the group.
   340        size = 300
   341      }
   342  
   343      # The "task" stanza creates an individual unit of work, such as a Docker
   344      # container, web application, or batch processing.
   345      #
   346      # For more information and examples on the "task" stanza, please see
   347      # the online documentation at:
   348      #
   349      #     https://www.nomadproject.io/docs/job-specification/task.html
   350      #
   351      task "redis" {
   352        # The "driver" parameter specifies the task driver that should be used to
   353        # run the task.
   354        driver = "docker"
   355  
   356        # The "config" stanza specifies the driver configuration, which is passed
   357        # directly to the driver to start the task. The details of configurations
   358        # are specific to each driver, so please see specific driver
   359        # documentation for more information.
   360        config {
   361          image = "redis:3.2"
   362          port_map {
   363            db = 6379
   364          }
   365        }
   366  
   367        # The "artifact" stanza instructs Nomad to download an artifact from a
   368        # remote source prior to starting the task. This provides a convenient
   369        # mechanism for downloading configuration files or data needed to run the
   370        # task. It is possible to specify the "artifact" stanza multiple times to
   371        # download multiple artifacts.
   372        #
   373        # For more information and examples on the "artifact" stanza, please see
   374        # the online documentation at:
   375        #
   376        #     https://www.nomadproject.io/docs/job-specification/artifact.html
   377        #
   378        # artifact {
   379        #   source = "http://foo.com/artifact.tar.gz"
   380        #   options {
   381        #     checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
   382        #   }
   383        # }
   384  
   385        # The "logs" stanza instructs the Nomad client on how many log files and
   386        # the maximum size of those logs files to retain. Logging is enabled by
   387        # default, but the "logs" stanza allows for finer-grained control over
   388        # the log rotation and storage configuration.
   389        #
   390        # For more information and examples on the "logs" stanza, please see
   391        # the online documentation at:
   392        #
   393        #     https://www.nomadproject.io/docs/job-specification/logs.html
   394        #
   395        # logs {
   396        #   max_files     = 10
   397        #   max_file_size = 15
   398        # }
   399  
   400        # The "resources" stanza describes the requirements a task needs to
   401        # execute. Resource requirements include memory, network, cpu, and more.
   402        # This ensures the task will execute on a machine that contains enough
   403        # resource capacity.
   404        #
   405        # For more information and examples on the "resources" stanza, please see
   406        # the online documentation at:
   407        #
   408        #     https://www.nomadproject.io/docs/job-specification/resources.html
   409        #
   410        resources {
   411          cpu    = 500 # 500 MHz
   412          memory = 256 # 256MB
   413          network {
   414            mbits = 10
   415            port "db" {}
   416          }
   417        }
   418  
   419        # The "service" stanza instructs Nomad to register this task as a service
   420        # in the service discovery engine, which is currently Consul. This will
   421        # make the service addressable after Nomad has placed it on a host and
   422        # port.
   423        #
   424        # For more information and examples on the "service" stanza, please see
   425        # the online documentation at:
   426        #
   427        #     https://www.nomadproject.io/docs/job-specification/service.html
   428        #
   429        service {
   430          name = "redis-cache"
   431          tags = ["global", "cache"]
   432          port = "db"
   433          check {
   434            name     = "alive"
   435            type     = "tcp"
   436            interval = "10s"
   437            timeout  = "2s"
   438          }
   439        }
   440  
   441        # The "template" stanza instructs Nomad to manage a template, such as
   442        # a configuration file or script. This template can optionally pull data
   443        # from Consul or Vault to populate runtime configuration data.
   444        #
   445        # For more information and examples on the "template" stanza, please see
   446        # the online documentation at:
   447        #
   448        #     https://www.nomadproject.io/docs/job-specification/template.html
   449        #
   450        # template {
   451        #   data          = "---\nkey: {{ key \"service/my-key\" }}"
   452        #   destination   = "local/file.yml"
   453        #   change_mode   = "signal"
   454        #   change_signal = "SIGHUP"
   455        # }
   456  
   457        # The "template" stanza can also be used to create environment variables
   458        # for tasks that prefer those to config files. The task will be restarted
   459        # when data pulled from Consul or Vault changes.
   460        #
   461        # template {
   462        #   data        = "KEY={{ key \"service/my-key\" }}"
   463        #   destination = "local/file.env"
   464        #   env         = true
   465        # }
   466  
   467        # The "vault" stanza instructs the Nomad client to acquire a token from
   468        # a HashiCorp Vault server. The Nomad servers must be configured and
   469        # authorized to communicate with Vault. By default, Nomad will inject
   470        # The token into the job via an environment variable and make the token
   471        # available to the "template" stanza. The Nomad client handles the renewal
   472        # and revocation of the Vault token.
   473        #
   474        # For more information and examples on the "vault" stanza, please see
   475        # the online documentation at:
   476        #
   477        #     https://www.nomadproject.io/docs/job-specification/vault.html
   478        #
   479        # vault {
   480        #   policies      = ["cdn", "frontend"]
   481        #   change_mode   = "signal"
   482        #   change_signal = "SIGHUP"
   483        # }
   484  
   485        # Controls the timeout between signalling a task it will be killed
   486        # and killing the task. If not set a default is used.
   487        # kill_timeout = "20s"
   488      }
   489    }
   490  }
   491  `)