github.com/emate/nomad@v0.8.2-wo-binpacking/command/job_init.go (about)

     1  package command
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"os"
     7  	"strings"
     8  )
     9  
    10  const (
    11  	// DefaultInitName is the default name we use when
    12  	// initializing the example file
    13  	DefaultInitName = "example.nomad"
    14  )
    15  
    16  // JobInitCommand generates a new job template that you can customize to your
    17  // liking, like vagrant init
    18  type JobInitCommand struct {
    19  	Meta
    20  }
    21  
    22  func (c *JobInitCommand) Help() string {
    23  	helpText := `
    24  Usage: nomad job init
    25  Alias: nomad init
    26  
    27    Creates an example job file that can be used as a starting
    28    point to customize further.
    29  `
    30  	return strings.TrimSpace(helpText)
    31  }
    32  
    33  func (c *JobInitCommand) Synopsis() string {
    34  	return "Create an example job file"
    35  }
    36  
    37  func (c *JobInitCommand) Name() string { return "job init" }
    38  
    39  func (c *JobInitCommand) Run(args []string) int {
    40  	// Check for misuse
    41  	if len(args) != 0 {
    42  		c.Ui.Error("This command takes no arguments")
    43  		c.Ui.Error(commandErrorText(c))
    44  		return 1
    45  	}
    46  
    47  	// Check if the file already exists
    48  	_, err := os.Stat(DefaultInitName)
    49  	if err != nil && !os.IsNotExist(err) {
    50  		c.Ui.Error(fmt.Sprintf("Failed to stat '%s': %v", DefaultInitName, err))
    51  		return 1
    52  	}
    53  	if !os.IsNotExist(err) {
    54  		c.Ui.Error(fmt.Sprintf("Job '%s' already exists", DefaultInitName))
    55  		return 1
    56  	}
    57  
    58  	// Write out the example
    59  	err = ioutil.WriteFile(DefaultInitName, []byte(defaultJob), 0660)
    60  	if err != nil {
    61  		c.Ui.Error(fmt.Sprintf("Failed to write '%s': %v", DefaultInitName, err))
    62  		return 1
    63  	}
    64  
    65  	// Success
    66  	c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName))
    67  	return 0
    68  }
    69  
    70  var defaultJob = strings.TrimSpace(`
    71  # There can only be a single job definition per file. This job is named
    72  # "example" so it will create a job with the ID and Name "example".
    73  
    74  # The "job" stanza is the top-most configuration option in the job
    75  # specification. A job is a declarative specification of tasks that Nomad
    76  # should run. Jobs have a globally unique name, one or many task groups, which
    77  # are themselves collections of one or many tasks.
    78  #
    79  # For more information and examples on the "job" stanza, please see
    80  # the online documentation at:
    81  #
    82  #     https://www.nomadproject.io/docs/job-specification/job.html
    83  #
    84  job "example" {
    85    # The "region" parameter specifies the region in which to execute the job. If
    86    # omitted, this inherits the default region name of "global".
    87    # region = "global"
    88  
    89    # The "datacenters" parameter specifies the list of datacenters which should
    90    # be considered when placing this task. This must be provided.
    91    datacenters = ["dc1"]
    92  
    93    # The "type" parameter controls the type of job, which impacts the scheduler's
    94    # decision on placement. This configuration is optional and defaults to
    95    # "service". For a full list of job types and their differences, please see
    96    # the online documentation.
    97    #
    98    # For more information, please see the online documentation at:
    99    #
   100    #     https://www.nomadproject.io/docs/jobspec/schedulers.html
   101    #
   102    type = "service"
   103  
   104    # The "constraint" stanza defines additional constraints for placing this job,
   105    # in addition to any resource or driver constraints. This stanza may be placed
   106    # at the "job", "group", or "task" level, and supports variable interpolation.
   107    #
   108    # For more information and examples on the "constraint" stanza, please see
   109    # the online documentation at:
   110    #
   111    #     https://www.nomadproject.io/docs/job-specification/constraint.html
   112    #
   113    # constraint {
   114    #   attribute = "${attr.kernel.name}"
   115    #   value     = "linux"
   116    # }
   117  
   118    # The "update" stanza specifies the update strategy of task groups. The update
   119    # strategy is used to control things like rolling upgrades, canaries, and
   120    # blue/green deployments. If omitted, no update strategy is enforced. The
   121    # "update" stanza may be placed at the job or task group. When placed at the
   122    # job, it applies to all groups within the job. When placed at both the job and
   123    # group level, the stanzas are merged with the group's taking precedence.
   124    #
   125    # For more information and examples on the "update" stanza, please see
   126    # the online documentation at:
   127    #
   128    #     https://www.nomadproject.io/docs/job-specification/update.html
   129    #
   130    update {
   131      # The "max_parallel" parameter specifies the maximum number of updates to
   132      # perform in parallel. In this case, this specifies to update a single task
   133      # at a time.
   134      max_parallel = 1
   135  
   136      # The "min_healthy_time" parameter specifies the minimum time the allocation
   137      # must be in the healthy state before it is marked as healthy and unblocks
   138      # further allocations from being updated.
   139      min_healthy_time = "10s"
   140  
   141      # The "healthy_deadline" parameter specifies the deadline in which the
   142      # allocation must be marked as healthy after which the allocation is
   143      # automatically transitioned to unhealthy. Transitioning to unhealthy will
   144      # fail the deployment and potentially roll back the job if "auto_revert" is
   145      # set to true.
   146      healthy_deadline = "3m"
   147  
   148      # The "auto_revert" parameter specifies if the job should auto-revert to the
   149      # last stable job on deployment failure. A job is marked as stable if all the
   150      # allocations as part of its deployment were marked healthy.
   151      auto_revert = false
   152  
   153      # The "canary" parameter specifies that changes to the job that would result
   154      # in destructive updates should create the specified number of canaries
   155      # without stopping any previous allocations. Once the operator determines the
   156      # canaries are healthy, they can be promoted which unblocks a rolling update
   157      # of the remaining allocations at a rate of "max_parallel".
   158      #
   159      # Further, setting "canary" equal to the count of the task group allows
   160      # blue/green deployments. When the job is updated, a full set of the new
   161      # version is deployed and upon promotion the old version is stopped.
   162      canary = 0
   163    }
   164  
   165    # The migrate stanza specifies the group's strategy for migrating off of
   166    # draining nodes. If omitted, a default migration strategy is applied.
   167    #
   168    # For more information on the "migrate" stanza, please see
   169    # the online documentation at:
   170    #
   171    #     https://www.nomadproject.io/docs/job-specification/migrate.html
   172    #
   173    migrate {
   174      # Specifies the number of task groups that can be migrated at the same
   175      # time. This number must be less than the total count for the group as
   176      # (count - max_parallel) will be left running during migrations.
   177      max_parallel = 1
   178  
   179      # Specifies the mechanism in which allocations health is determined. The
   180      # potential values are "checks" or "task_states".
   181      health_check = "checks"
   182  
   183      # Specifies the minimum time the allocation must be in the healthy state
   184      # before it is marked as healthy and unblocks further allocations from being
   185      # migrated. This is specified using a label suffix like "30s" or "15m".
   186      min_healthy_time = "10s"
   187  
   188      # Specifies the deadline in which the allocation must be marked as healthy
   189      # after which the allocation is automatically transitioned to unhealthy. This
   190      # is specified using a label suffix like "2m" or "1h".
   191      healthy_deadline = "5m"
   192    }
   193  
   194    # The "group" stanza defines a series of tasks that should be co-located on
   195    # the same Nomad client. Any task within a group will be placed on the same
   196    # client.
   197    #
   198    # For more information and examples on the "group" stanza, please see
   199    # the online documentation at:
   200    #
   201    #     https://www.nomadproject.io/docs/job-specification/group.html
   202    #
   203    group "cache" {
   204      # The "count" parameter specifies the number of the task groups that should
   205      # be running under this group. This value must be non-negative and defaults
   206      # to 1.
   207      count = 1
   208  
   209      # The "restart" stanza configures a group's behavior on task failure. If
   210      # left unspecified, a default restart policy is used based on the job type.
   211      #
   212      # For more information and examples on the "restart" stanza, please see
   213      # the online documentation at:
   214      #
   215      #     https://www.nomadproject.io/docs/job-specification/restart.html
   216      #
   217      restart {
   218        # The number of attempts to run the job within the specified interval.
   219        attempts = 2
   220        interval = "30m"
   221  
   222        # The "delay" parameter specifies the duration to wait before restarting
   223        # a task after it has failed.
   224        delay = "15s"
   225  
   226       # The "mode" parameter controls what happens when a task has restarted
   227       # "attempts" times within the interval. "delay" mode delays the next
   228       # restart until the next interval. "fail" mode does not restart the task
   229       # if "attempts" has been hit within the interval.
   230        mode = "fail"
   231      }
   232  
   233      # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
   234      # instead of a hard disk requirement. Clients using this stanza should
   235      # not specify disk requirements in the resources stanza of the task. All
   236      # tasks in this group will share the same ephemeral disk.
   237      #
   238      # For more information and examples on the "ephemeral_disk" stanza, please
   239      # see the online documentation at:
   240      #
   241      #     https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
   242      #
   243      ephemeral_disk {
   244        # When sticky is true and the task group is updated, the scheduler
   245        # will prefer to place the updated allocation on the same node and
   246        # will migrate the data. This is useful for tasks that store data
   247        # that should persist across allocation updates.
   248        # sticky = true
   249        #
   250        # Setting migrate to true results in the allocation directory of a
   251        # sticky allocation directory to be migrated.
   252        # migrate = true
   253  
   254        # The "size" parameter specifies the size in MB of shared ephemeral disk
   255        # between tasks in the group.
   256        size = 300
   257      }
   258  
   259      # The "task" stanza creates an individual unit of work, such as a Docker
   260      # container, web application, or batch processing.
   261      #
   262      # For more information and examples on the "task" stanza, please see
   263      # the online documentation at:
   264      #
   265      #     https://www.nomadproject.io/docs/job-specification/task.html
   266      #
   267      task "redis" {
   268        # The "driver" parameter specifies the task driver that should be used to
   269        # run the task.
   270        driver = "docker"
   271  
   272        # The "config" stanza specifies the driver configuration, which is passed
   273        # directly to the driver to start the task. The details of configurations
   274        # are specific to each driver, so please see specific driver
   275        # documentation for more information.
   276        config {
   277          image = "redis:3.2"
   278          port_map {
   279            db = 6379
   280          }
   281        }
   282  
   283        # The "artifact" stanza instructs Nomad to download an artifact from a
   284        # remote source prior to starting the task. This provides a convenient
   285        # mechanism for downloading configuration files or data needed to run the
   286        # task. It is possible to specify the "artifact" stanza multiple times to
   287        # download multiple artifacts.
   288        #
   289        # For more information and examples on the "artifact" stanza, please see
   290        # the online documentation at:
   291        #
   292        #     https://www.nomadproject.io/docs/job-specification/artifact.html
   293        #
   294        # artifact {
   295        #   source = "http://foo.com/artifact.tar.gz"
   296        #   options {
   297        #     checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
   298        #   }
   299        # }
   300  
   301        # The "logs" stanza instructs the Nomad client on how many log files and
   302        # the maximum size of those logs files to retain. Logging is enabled by
   303        # default, but the "logs" stanza allows for finer-grained control over
   304        # the log rotation and storage configuration.
   305        #
   306        # For more information and examples on the "logs" stanza, please see
   307        # the online documentation at:
   308        #
   309        #     https://www.nomadproject.io/docs/job-specification/logs.html
   310        #
   311        # logs {
   312        #   max_files     = 10
   313        #   max_file_size = 15
   314        # }
   315  
   316        # The "resources" stanza describes the requirements a task needs to
   317        # execute. Resource requirements include memory, network, cpu, and more.
   318        # This ensures the task will execute on a machine that contains enough
   319        # resource capacity.
   320        #
   321        # For more information and examples on the "resources" stanza, please see
   322        # the online documentation at:
   323        #
   324        #     https://www.nomadproject.io/docs/job-specification/resources.html
   325        #
   326        resources {
   327          cpu    = 500 # 500 MHz
   328          memory = 256 # 256MB
   329          network {
   330            mbits = 10
   331            port "db" {}
   332          }
   333        }
   334  
   335        # The "service" stanza instructs Nomad to register this task as a service
   336        # in the service discovery engine, which is currently Consul. This will
   337        # make the service addressable after Nomad has placed it on a host and
   338        # port.
   339        #
   340        # For more information and examples on the "service" stanza, please see
   341        # the online documentation at:
   342        #
   343        #     https://www.nomadproject.io/docs/job-specification/service.html
   344        #
   345        service {
   346          name = "redis-cache"
   347          tags = ["global", "cache"]
   348          port = "db"
   349          check {
   350            name     = "alive"
   351            type     = "tcp"
   352            interval = "10s"
   353            timeout  = "2s"
   354          }
   355        }
   356  
   357        # The "template" stanza instructs Nomad to manage a template, such as
   358        # a configuration file or script. This template can optionally pull data
   359        # from Consul or Vault to populate runtime configuration data.
   360        #
   361        # For more information and examples on the "template" stanza, please see
   362        # the online documentation at:
   363        #
   364        #     https://www.nomadproject.io/docs/job-specification/template.html
   365        #
   366        # template {
   367        #   data          = "---\nkey: {{ key \"service/my-key\" }}"
   368        #   destination   = "local/file.yml"
   369        #   change_mode   = "signal"
   370        #   change_signal = "SIGHUP"
   371        # }
   372  
   373        # The "template" stanza can also be used to create environment variables
   374        # for tasks that prefer those to config files. The task will be restarted
   375        # when data pulled from Consul or Vault changes.
   376        #
   377        # template {
   378        #   data        = "KEY={{ key \"service/my-key\" }}"
   379        #   destination = "local/file.env"
   380        #   env         = true
   381        # }
   382  
   383        # The "vault" stanza instructs the Nomad client to acquire a token from
   384        # a HashiCorp Vault server. The Nomad servers must be configured and
   385        # authorized to communicate with Vault. By default, Nomad will inject
   386        # The token into the job via an environment variable and make the token
   387        # available to the "template" stanza. The Nomad client handles the renewal
   388        # and revocation of the Vault token.
   389        #
   390        # For more information and examples on the "vault" stanza, please see
   391        # the online documentation at:
   392        #
   393        #     https://www.nomadproject.io/docs/job-specification/vault.html
   394        #
   395        # vault {
   396        #   policies      = ["cdn", "frontend"]
   397        #   change_mode   = "signal"
   398        #   change_signal = "SIGHUP"
   399        # }
   400  
   401        # Controls the timeout between signalling a task it will be killed
   402        # and killing the task. If not set a default is used.
   403        # kill_timeout = "20s"
   404      }
   405    }
   406  }
   407  `)