github.com/janma/nomad@v0.11.3/command/assets/example.nomad (about)

     1  # There can only be a single job definition per file. This job is named
     2  # "example" so it will create a job with the ID and Name "example".
     3  
     4  # The "job" stanza is the top-most configuration option in the job
     5  # specification. A job is a declarative specification of tasks that Nomad
     6  # should run. Jobs have a globally unique name, one or many task groups, which
     7  # are themselves collections of one or many tasks.
     8  #
     9  # For more information and examples on the "job" stanza, please see
    10  # the online documentation at:
    11  #
    12  #     https://www.nomadproject.io/docs/job-specification/job.html
    13  #
    14  job "example" {
    15    # The "region" parameter specifies the region in which to execute the job.
    16    # If omitted, this inherits the default region name of "global".
    17    # region = "global"
    18    #
    19    # The "datacenters" parameter specifies the list of datacenters which should
    20    # be considered when placing this task. This must be provided.
    21    datacenters = ["dc1"]
    22  
    23    # The "type" parameter controls the type of job, which impacts the scheduler's
    24    # decision on placement. This configuration is optional and defaults to
    25    # "service". For a full list of job types and their differences, please see
    26    # the online documentation.
    27    #
    28    # For more information, please see the online documentation at:
    29    #
    30    #     https://www.nomadproject.io/docs/jobspec/schedulers.html
    31    #
    32    type = "service"
    33  
    34    # The "constraint" stanza defines additional constraints for placing this job,
    35    # in addition to any resource or driver constraints. This stanza may be placed
    36    # at the "job", "group", or "task" level, and supports variable interpolation.
    37    #
    38    # For more information and examples on the "constraint" stanza, please see
    39    # the online documentation at:
    40    #
    41    #     https://www.nomadproject.io/docs/job-specification/constraint.html
    42    #
    43    # constraint {
    44    #   attribute = "${attr.kernel.name}"
    45    #   value     = "linux"
    46    # }
    47  
    48    # The "update" stanza specifies the update strategy of task groups. The update
    49    # strategy is used to control things like rolling upgrades, canaries, and
    50    # blue/green deployments. If omitted, no update strategy is enforced. The
    51    # "update" stanza may be placed at the job or task group. When placed at the
    52    # job, it applies to all groups within the job. When placed at both the job and
    53    # group level, the stanzas are merged with the group's taking precedence.
    54    #
    55    # For more information and examples on the "update" stanza, please see
    56    # the online documentation at:
    57    #
    58    #     https://www.nomadproject.io/docs/job-specification/update.html
    59    #
    60    update {
    61      # The "max_parallel" parameter specifies the maximum number of updates to
    62      # perform in parallel. In this case, this specifies to update a single task
    63      # at a time.
    64      max_parallel = 1
    65  
    66      # The "min_healthy_time" parameter specifies the minimum time the allocation
    67      # must be in the healthy state before it is marked as healthy and unblocks
    68      # further allocations from being updated.
    69      min_healthy_time = "10s"
    70  
    71      # The "healthy_deadline" parameter specifies the deadline in which the
    72      # allocation must be marked as healthy after which the allocation is
    73      # automatically transitioned to unhealthy. Transitioning to unhealthy will
    74      # fail the deployment and potentially roll back the job if "auto_revert" is
    75      # set to true.
    76      healthy_deadline = "3m"
    77  
    78      # The "progress_deadline" parameter specifies the deadline in which an
    79      # allocation must be marked as healthy. The deadline begins when the first
    80      # allocation for the deployment is created and is reset whenever an allocation
    81      # as part of the deployment transitions to a healthy state. If no allocation
    82      # transitions to the healthy state before the progress deadline, the
    83      # deployment is marked as failed.
    84      progress_deadline = "10m"
    85  
    86      # The "auto_revert" parameter specifies if the job should auto-revert to the
    87      # last stable job on deployment failure. A job is marked as stable if all the
    88      # allocations as part of its deployment were marked healthy.
    89      auto_revert = false
    90  
    91      # The "canary" parameter specifies that changes to the job that would result
    92      # in destructive updates should create the specified number of canaries
    93      # without stopping any previous allocations. Once the operator determines the
    94      # canaries are healthy, they can be promoted which unblocks a rolling update
    95      # of the remaining allocations at a rate of "max_parallel".
    96      #
    97      # Further, setting "canary" equal to the count of the task group allows
    98      # blue/green deployments. When the job is updated, a full set of the new
    99      # version is deployed and upon promotion the old version is stopped.
   100      canary = 0
   101    }
   102    # The migrate stanza specifies the group's strategy for migrating off of
   103    # draining nodes. If omitted, a default migration strategy is applied.
   104    #
   105    # For more information on the "migrate" stanza, please see
   106    # the online documentation at:
   107    #
   108    #     https://www.nomadproject.io/docs/job-specification/migrate.html
   109    #
   110    migrate {
   111      # Specifies the number of task groups that can be migrated at the same
   112      # time. This number must be less than the total count for the group as
   113      # (count - max_parallel) will be left running during migrations.
   114      max_parallel = 1
   115  
   116      # Specifies the mechanism in which allocations health is determined. The
   117      # potential values are "checks" or "task_states".
   118      health_check = "checks"
   119  
   120      # Specifies the minimum time the allocation must be in the healthy state
   121      # before it is marked as healthy and unblocks further allocations from being
   122      # migrated. This is specified using a label suffix like "30s" or "15m".
   123      min_healthy_time = "10s"
   124  
   125      # Specifies the deadline in which the allocation must be marked as healthy
   126      # after which the allocation is automatically transitioned to unhealthy. This
   127      # is specified using a label suffix like "2m" or "1h".
   128      healthy_deadline = "5m"
   129    }
   130    # The "group" stanza defines a series of tasks that should be co-located on
   131    # the same Nomad client. Any task within a group will be placed on the same
   132    # client.
   133    #
   134    # For more information and examples on the "group" stanza, please see
   135    # the online documentation at:
   136    #
   137    #     https://www.nomadproject.io/docs/job-specification/group.html
   138    #
   139    group "cache" {
   140      # The "count" parameter specifies the number of the task groups that should
   141      # be running under this group. This value must be non-negative and defaults
   142      # to 1.
   143      count = 1
   144  
   145      # The "restart" stanza configures a group's behavior on task failure. If
   146      # left unspecified, a default restart policy is used based on the job type.
   147      #
   148      # For more information and examples on the "restart" stanza, please see
   149      # the online documentation at:
   150      #
   151      #     https://www.nomadproject.io/docs/job-specification/restart.html
   152      #
   153      restart {
   154        # The number of attempts to run the job within the specified interval.
   155        attempts = 2
   156        interval = "30m"
   157  
   158        # The "delay" parameter specifies the duration to wait before restarting
   159        # a task after it has failed.
   160        delay = "15s"
   161  
   162        # The "mode" parameter controls what happens when a task has restarted
   163        # "attempts" times within the interval. "delay" mode delays the next
   164        # restart until the next interval. "fail" mode does not restart the task
   165        # if "attempts" has been hit within the interval.
   166        mode = "fail"
   167      }
   168  
   169      # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
   170      # instead of a hard disk requirement. Clients using this stanza should
   171      # not specify disk requirements in the resources stanza of the task. All
   172      # tasks in this group will share the same ephemeral disk.
   173      #
   174      # For more information and examples on the "ephemeral_disk" stanza, please
   175      # see the online documentation at:
   176      #
   177      #     https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
   178      #
   179      ephemeral_disk {
   180        # When sticky is true and the task group is updated, the scheduler
   181        # will prefer to place the updated allocation on the same node and
   182        # will migrate the data. This is useful for tasks that store data
   183        # that should persist across allocation updates.
   184        # sticky = true
   185        #
   186        # Setting migrate to true results in the allocation directory of a
   187        # sticky allocation directory to be migrated.
   188        # migrate = true
   189        #
   190        # The "size" parameter specifies the size in MB of shared ephemeral disk
   191        # between tasks in the group.
   192        size = 300
   193      }
   194  
   195      # The "affinity" stanza enables operators to express placement preferences
   196      # based on node attributes or metadata.
   197      #
   198      # For more information and examples on the "affinity" stanza, please
   199      # see the online documentation at:
   200      #
   201      #     https://www.nomadproject.io/docs/job-specification/affinity.html
   202      #
   203      # affinity {
   204      # attribute specifies the name of a node attribute or metadata
   205      # attribute = "${node.datacenter}"
   206  
   207  
   208      # value specifies the desired attribute value. In this example Nomad
   209      # will prefer placement in the "us-west1" datacenter.
   210      # value  = "us-west1"
   211  
   212  
   213      # weight can be used to indicate relative preference
   214      # when the job has more than one affinity. It defaults to 50 if not set.
   215      # weight = 100
   216      #  }
   217  
   218  
   219      # The "spread" stanza allows operators to increase the failure tolerance of
   220      # their applications by specifying a node attribute that allocations
   221      # should be spread over.
   222      #
   223      # For more information and examples on the "spread" stanza, please
   224      # see the online documentation at:
   225      #
   226      #     https://www.nomadproject.io/docs/job-specification/spread.html
   227      #
   228      # spread {
   229      # attribute specifies the name of a node attribute or metadata
   230      # attribute = "${node.datacenter}"
   231  
   232  
   233      # targets can be used to define desired percentages of allocations
   234      # for each targeted attribute value.
   235      #
   236      #   target "us-east1" {
   237      #     percent = 60
   238      #   }
   239      #   target "us-west1" {
   240      #     percent = 40
   241      #   }
   242      #  }
   243  
   244      # The "task" stanza creates an individual unit of work, such as a Docker
   245      # container, web application, or batch processing.
   246      #
   247      # For more information and examples on the "task" stanza, please see
   248      # the online documentation at:
   249      #
   250      #     https://www.nomadproject.io/docs/job-specification/task.html
   251      #
   252      task "redis" {
   253        # The "driver" parameter specifies the task driver that should be used to
   254        # run the task.
   255        driver = "docker"
   256  
   257        # The "config" stanza specifies the driver configuration, which is passed
   258        # directly to the driver to start the task. The details of configurations
   259        # are specific to each driver, so please see specific driver
   260        # documentation for more information.
   261        config {
   262          image = "redis:3.2"
   263  
   264          port_map {
   265            db = 6379
   266          }
   267        }
   268  
   269        # The "artifact" stanza instructs Nomad to download an artifact from a
   270        # remote source prior to starting the task. This provides a convenient
   271        # mechanism for downloading configuration files or data needed to run the
   272        # task. It is possible to specify the "artifact" stanza multiple times to
   273        # download multiple artifacts.
   274        #
   275        # For more information and examples on the "artifact" stanza, please see
   276        # the online documentation at:
   277        #
   278        #     https://www.nomadproject.io/docs/job-specification/artifact.html
   279        #
   280        # artifact {
   281        #   source = "http://foo.com/artifact.tar.gz"
   282        #   options {
   283        #     checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
   284        #   }
   285        # }
   286  
   287  
   288        # The "logs" stanza instructs the Nomad client on how many log files and
   289        # the maximum size of those logs files to retain. Logging is enabled by
   290        # default, but the "logs" stanza allows for finer-grained control over
   291        # the log rotation and storage configuration.
   292        #
   293        # For more information and examples on the "logs" stanza, please see
   294        # the online documentation at:
   295        #
   296        #     https://www.nomadproject.io/docs/job-specification/logs.html
   297        #
   298        # logs {
   299        #   max_files     = 10
   300        #   max_file_size = 15
   301        # }
   302  
   303        # The "resources" stanza describes the requirements a task needs to
   304        # execute. Resource requirements include memory, network, cpu, and more.
   305        # This ensures the task will execute on a machine that contains enough
   306        # resource capacity.
   307        #
   308        # For more information and examples on the "resources" stanza, please see
   309        # the online documentation at:
   310        #
   311        #     https://www.nomadproject.io/docs/job-specification/resources.html
   312        #
   313        resources {
   314          cpu    = 500 # 500 MHz
   315          memory = 256 # 256MB
   316  
   317          network {
   318            mbits = 10
   319            port  "db"  {}
   320          }
   321        }
   322        # The "service" stanza instructs Nomad to register this task as a service
   323        # in the service discovery engine, which is currently Consul. This will
   324        # make the service addressable after Nomad has placed it on a host and
   325        # port.
   326        #
   327        # For more information and examples on the "service" stanza, please see
   328        # the online documentation at:
   329        #
   330        #     https://www.nomadproject.io/docs/job-specification/service.html
   331        #
   332        service {
   333          name = "redis-cache"
   334          tags = ["global", "cache"]
   335          port = "db"
   336  
   337          check {
   338            name     = "alive"
   339            type     = "tcp"
   340            interval = "10s"
   341            timeout  = "2s"
   342          }
   343        }
   344  
   345        # The "template" stanza instructs Nomad to manage a template, such as
   346        # a configuration file or script. This template can optionally pull data
   347        # from Consul or Vault to populate runtime configuration data.
   348        #
   349        # For more information and examples on the "template" stanza, please see
   350        # the online documentation at:
   351        #
   352        #     https://www.nomadproject.io/docs/job-specification/template.html
   353        #
   354        # template {
   355        #   data          = "---\nkey: {{ key \"service/my-key\" }}"
   356        #   destination   = "local/file.yml"
   357        #   change_mode   = "signal"
   358        #   change_signal = "SIGHUP"
   359        # }
   360  
   361        # The "template" stanza can also be used to create environment variables
   362        # for tasks that prefer those to config files. The task will be restarted
   363        # when data pulled from Consul or Vault changes.
   364        #
   365        # template {
   366        #   data        = "KEY={{ key \"service/my-key\" }}"
   367        #   destination = "local/file.env"
   368        #   env         = true
   369        # }
   370  
   371        # The "vault" stanza instructs the Nomad client to acquire a token from
   372        # a HashiCorp Vault server. The Nomad servers must be configured and
   373        # authorized to communicate with Vault. By default, Nomad will inject
   374        # The token into the job via an environment variable and make the token
   375        # available to the "template" stanza. The Nomad client handles the renewal
   376        # and revocation of the Vault token.
   377        #
   378        # For more information and examples on the "vault" stanza, please see
   379        # the online documentation at:
   380        #
   381        #     https://www.nomadproject.io/docs/job-specification/vault.html
   382        #
   383        # vault {
   384        #   policies      = ["cdn", "frontend"]
   385        #   change_mode   = "signal"
   386        #   change_signal = "SIGHUP"
   387        # }
   388  
   389        # Controls the timeout between signalling a task it will be killed
   390        # and killing the task. If not set a default is used.
   391        # kill_timeout = "20s"
   392      }
   393    }
   394  }