github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/command/assets/example.nomad (about)

     1  # There can only be a single job definition per file. This job is named
     2  # "example" so it will create a job with the ID and Name "example".
     3  
     4  # The "job" stanza is the top-most configuration option in the job
     5  # specification. A job is a declarative specification of tasks that Nomad
     6  # should run. Jobs have a globally unique name, one or many task groups, which
     7  # are themselves collections of one or many tasks.
     8  #
     9  # For more information and examples on the "job" stanza, please see
    10  # the online documentation at:
    11  #
    12  #     https://www.nomadproject.io/docs/job-specification/job
    13  #
    14  job "example" {
    15    # The "region" parameter specifies the region in which to execute the job.
    16    # If omitted, this inherits the default region name of "global".
    17    # region = "global"
    18    #
    19    # The "datacenters" parameter specifies the list of datacenters which should
    20    # be considered when placing this task. This must be provided.
    21    datacenters = ["dc1"]
    22  
    23    # The "type" parameter controls the type of job, which impacts the scheduler's
    24    # decision on placement. This configuration is optional and defaults to
    25    # "service". For a full list of job types and their differences, please see
    26    # the online documentation.
    27    #
    28    # For more information, please see the online documentation at:
    29    #
    30    #     https://www.nomadproject.io/docs/schedulers
    31    #
    32    type = "service"
    33  
    34    # The "constraint" stanza defines additional constraints for placing this job,
    35    # in addition to any resource or driver constraints. This stanza may be placed
    36    # at the "job", "group", or "task" level, and supports variable interpolation.
    37    #
    38    # For more information and examples on the "constraint" stanza, please see
    39    # the online documentation at:
    40    #
    41    #     https://www.nomadproject.io/docs/job-specification/constraint
    42    #
    43    # constraint {
    44    #   attribute = "${attr.kernel.name}"
    45    #   value     = "linux"
    46    # }
    47  
    48    # The "update" stanza specifies the update strategy of task groups. The update
    49    # strategy is used to control things like rolling upgrades, canaries, and
    50    # blue/green deployments. If omitted, no update strategy is enforced. The
    51    # "update" stanza may be placed at the job or task group. When placed at the
    52    # job, it applies to all groups within the job. When placed at both the job and
    53    # group level, the stanzas are merged with the group's taking precedence.
    54    #
    55    # For more information and examples on the "update" stanza, please see
    56    # the online documentation at:
    57    #
    58    #     https://www.nomadproject.io/docs/job-specification/update
    59    #
    60    update {
    61      # The "max_parallel" parameter specifies the maximum number of updates to
    62      # perform in parallel. In this case, this specifies to update a single task
    63      # at a time.
    64      max_parallel = 1
    65  
    66      # The "min_healthy_time" parameter specifies the minimum time the allocation
    67      # must be in the healthy state before it is marked as healthy and unblocks
    68      # further allocations from being updated.
    69      min_healthy_time = "10s"
    70  
    71      # The "healthy_deadline" parameter specifies the deadline in which the
    72      # allocation must be marked as healthy after which the allocation is
    73      # automatically transitioned to unhealthy. Transitioning to unhealthy will
    74      # fail the deployment and potentially roll back the job if "auto_revert" is
    75      # set to true.
    76      healthy_deadline = "3m"
    77  
    78      # The "progress_deadline" parameter specifies the deadline in which an
    79      # allocation must be marked as healthy. The deadline begins when the first
    80      # allocation for the deployment is created and is reset whenever an allocation
    81      # as part of the deployment transitions to a healthy state. If no allocation
    82      # transitions to the healthy state before the progress deadline, the
    83      # deployment is marked as failed.
    84      progress_deadline = "10m"
    85  
    86      # The "auto_revert" parameter specifies if the job should auto-revert to the
    87      # last stable job on deployment failure. A job is marked as stable if all the
    88      # allocations as part of its deployment were marked healthy.
    89      auto_revert = false
    90  
    91      # The "canary" parameter specifies that changes to the job that would result
    92      # in destructive updates should create the specified number of canaries
    93      # without stopping any previous allocations. Once the operator determines the
    94      # canaries are healthy, they can be promoted which unblocks a rolling update
    95      # of the remaining allocations at a rate of "max_parallel".
    96      #
    97      # Further, setting "canary" equal to the count of the task group allows
    98      # blue/green deployments. When the job is updated, a full set of the new
    99      # version is deployed and upon promotion the old version is stopped.
   100      canary = 0
   101    }
   102    # The migrate stanza specifies the group's strategy for migrating off of
   103    # draining nodes. If omitted, a default migration strategy is applied.
   104    #
   105    # For more information on the "migrate" stanza, please see
   106    # the online documentation at:
   107    #
   108    #     https://www.nomadproject.io/docs/job-specification/migrate
   109    #
   110    migrate {
   111      # Specifies the number of task groups that can be migrated at the same
   112      # time. This number must be less than the total count for the group as
   113      # (count - max_parallel) will be left running during migrations.
   114      max_parallel = 1
   115  
   116      # Specifies the mechanism in which allocations health is determined. The
   117      # potential values are "checks" or "task_states".
   118      health_check = "checks"
   119  
   120      # Specifies the minimum time the allocation must be in the healthy state
   121      # before it is marked as healthy and unblocks further allocations from being
   122      # migrated. This is specified using a label suffix like "30s" or "15m".
   123      min_healthy_time = "10s"
   124  
   125      # Specifies the deadline in which the allocation must be marked as healthy
   126      # after which the allocation is automatically transitioned to unhealthy. This
   127      # is specified using a label suffix like "2m" or "1h".
   128      healthy_deadline = "5m"
   129    }
   130    # The "group" stanza defines a series of tasks that should be co-located on
   131    # the same Nomad client. Any task within a group will be placed on the same
   132    # client.
   133    #
   134    # For more information and examples on the "group" stanza, please see
   135    # the online documentation at:
   136    #
   137    #     https://www.nomadproject.io/docs/job-specification/group
   138    #
   139    group "cache" {
   140      # The "count" parameter specifies the number of the task groups that should
   141      # be running under this group. This value must be non-negative and defaults
   142      # to 1.
   143      count = 1
   144  
   145      # The "network" stanza specifies the network configuration for the allocation
   146      # including requesting port bindings.
   147      #
   148      # For more information and examples on the "network" stanza, please see
   149      # the online documentation at:
   150      #
   151      #     https://www.nomadproject.io/docs/job-specification/network
   152      #
   153      network {
   154        port "db" {
   155          to = 6379
   156        }
   157      }
   158  
   159      # The "service" stanza instructs Nomad to register this task as a service
   160      # in the service discovery engine, which is currently Consul. This will
   161      # make the service addressable after Nomad has placed it on a host and
   162      # port.
   163      #
   164      # For more information and examples on the "service" stanza, please see
   165      # the online documentation at:
   166      #
   167      #     https://www.nomadproject.io/docs/job-specification/service
   168      #
   169      service {
   170        name = "redis-cache"
   171        tags = ["global", "cache"]
   172        port = "db"
   173  
   174        # The "check" stanza instructs Nomad to create a Consul health check for
   175        # this service. A sample check is provided here for your convenience;
   176        # uncomment it to enable it. The "check" stanza is documented in the
   177        # "service" stanza documentation.
   178  
   179        # check {
   180        #   name     = "alive"
   181        #   type     = "tcp"
   182        #   interval = "10s"
   183        #   timeout  = "2s"
   184        # }
   185  
   186      }
   187  
   188      # The "restart" stanza configures a group's behavior on task failure. If
   189      # left unspecified, a default restart policy is used based on the job type.
   190      #
   191      # For more information and examples on the "restart" stanza, please see
   192      # the online documentation at:
   193      #
   194      #     https://www.nomadproject.io/docs/job-specification/restart
   195      #
   196      restart {
   197        # The number of attempts to run the job within the specified interval.
   198        attempts = 2
   199        interval = "30m"
   200  
   201        # The "delay" parameter specifies the duration to wait before restarting
   202        # a task after it has failed.
   203        delay = "15s"
   204  
   205        # The "mode" parameter controls what happens when a task has restarted
   206        # "attempts" times within the interval. "delay" mode delays the next
   207        # restart until the next interval. "fail" mode does not restart the task
   208        # if "attempts" has been hit within the interval.
   209        mode = "fail"
   210      }
   211  
   212      # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
   213      # instead of a hard disk requirement. Clients using this stanza should
   214      # not specify disk requirements in the resources stanza of the task. All
   215      # tasks in this group will share the same ephemeral disk.
   216      #
   217      # For more information and examples on the "ephemeral_disk" stanza, please
   218      # see the online documentation at:
   219      #
   220      #     https://www.nomadproject.io/docs/job-specification/ephemeral_disk
   221      #
   222      ephemeral_disk {
   223        # When sticky is true and the task group is updated, the scheduler
   224        # will prefer to place the updated allocation on the same node and
   225        # will migrate the data. This is useful for tasks that store data
   226        # that should persist across allocation updates.
   227        # sticky = true
   228        #
   229        # Setting migrate to true results in the allocation directory of a
   230        # sticky allocation directory to be migrated.
   231        # migrate = true
   232        #
   233        # The "size" parameter specifies the size in MB of shared ephemeral disk
   234        # between tasks in the group.
   235        size = 300
   236      }
   237  
   238      # The "affinity" stanza enables operators to express placement preferences
   239      # based on node attributes or metadata.
   240      #
   241      # For more information and examples on the "affinity" stanza, please
   242      # see the online documentation at:
   243      #
   244      #     https://www.nomadproject.io/docs/job-specification/affinity
   245      #
   246      # affinity {
   247      # attribute specifies the name of a node attribute or metadata
   248      # attribute = "${node.datacenter}"
   249  
   250  
   251      # value specifies the desired attribute value. In this example Nomad
   252      # will prefer placement in the "us-west1" datacenter.
   253      # value  = "us-west1"
   254  
   255  
   256      # weight can be used to indicate relative preference
   257      # when the job has more than one affinity. It defaults to 50 if not set.
   258      # weight = 100
   259      #  }
   260  
   261  
   262      # The "spread" stanza allows operators to increase the failure tolerance of
   263      # their applications by specifying a node attribute that allocations
   264      # should be spread over.
   265      #
   266      # For more information and examples on the "spread" stanza, please
   267      # see the online documentation at:
   268      #
   269      #     https://www.nomadproject.io/docs/job-specification/spread
   270      #
   271      # spread {
   272      # attribute specifies the name of a node attribute or metadata
   273      # attribute = "${node.datacenter}"
   274  
   275  
   276      # targets can be used to define desired percentages of allocations
   277      # for each targeted attribute value.
   278      #
   279      #   target "us-east1" {
   280      #     percent = 60
   281      #   }
   282      #   target "us-west1" {
   283      #     percent = 40
   284      #   }
   285      #  }
   286  
   287      # The "task" stanza creates an individual unit of work, such as a Docker
   288      # container, web application, or batch processing.
   289      #
   290      # For more information and examples on the "task" stanza, please see
   291      # the online documentation at:
   292      #
   293      #     https://www.nomadproject.io/docs/job-specification/task
   294      #
   295      task "redis" {
   296        # The "driver" parameter specifies the task driver that should be used to
   297        # run the task.
   298        driver = "docker"
   299  
   300        # The "config" stanza specifies the driver configuration, which is passed
   301        # directly to the driver to start the task. The details of configurations
   302        # are specific to each driver, so please see specific driver
   303        # documentation for more information.
   304        config {
   305          image = "redis:3.2"
   306  
   307          ports = ["db"]
   308        }
   309  
   310        # The "artifact" stanza instructs Nomad to download an artifact from a
   311        # remote source prior to starting the task. This provides a convenient
   312        # mechanism for downloading configuration files or data needed to run the
   313        # task. It is possible to specify the "artifact" stanza multiple times to
   314        # download multiple artifacts.
   315        #
   316        # For more information and examples on the "artifact" stanza, please see
   317        # the online documentation at:
   318        #
   319        #     https://www.nomadproject.io/docs/job-specification/artifact
   320        #
   321        # artifact {
   322        #   source = "http://foo.com/artifact.tar.gz"
   323        #   options {
   324        #     checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
   325        #   }
   326        # }
   327  
   328  
   329        # The "logs" stanza instructs the Nomad client on how many log files and
   330        # the maximum size of those logs files to retain. Logging is enabled by
   331        # default, but the "logs" stanza allows for finer-grained control over
   332        # the log rotation and storage configuration.
   333        #
   334        # For more information and examples on the "logs" stanza, please see
   335        # the online documentation at:
   336        #
   337        #     https://www.nomadproject.io/docs/job-specification/logs
   338        #
   339        # logs {
   340        #   max_files     = 10
   341        #   max_file_size = 15
   342        # }
   343  
   344        # The "resources" stanza describes the requirements a task needs to
   345        # execute. Resource requirements include memory, cpu, and more.
   346        # This ensures the task will execute on a machine that contains enough
   347        # resource capacity.
   348        #
   349        # For more information and examples on the "resources" stanza, please see
   350        # the online documentation at:
   351        #
   352        #     https://www.nomadproject.io/docs/job-specification/resources
   353        #
   354        resources {
   355          cpu    = 500 # 500 MHz
   356          memory = 256 # 256MB
   357        }
   358  
   359  
   360        # The "template" stanza instructs Nomad to manage a template, such as
   361        # a configuration file or script. This template can optionally pull data
   362        # from Consul or Vault to populate runtime configuration data.
   363        #
   364        # For more information and examples on the "template" stanza, please see
   365        # the online documentation at:
   366        #
   367        #     https://www.nomadproject.io/docs/job-specification/template
   368        #
   369        # template {
   370        #   data          = "---\nkey: {{ key \"service/my-key\" }}"
   371        #   destination   = "local/file.yml"
   372        #   change_mode   = "signal"
   373        #   change_signal = "SIGHUP"
   374        # }
   375  
   376        # The "template" stanza can also be used to create environment variables
   377        # for tasks that prefer those to config files. The task will be restarted
   378        # when data pulled from Consul or Vault changes.
   379        #
   380        # template {
   381        #   data        = "KEY={{ key \"service/my-key\" }}"
   382        #   destination = "local/file.env"
   383        #   env         = true
   384        # }
   385  
   386        # The "vault" stanza instructs the Nomad client to acquire a token from
   387        # a HashiCorp Vault server. The Nomad servers must be configured and
   388        # authorized to communicate with Vault. By default, Nomad will inject
   389        # The token into the job via an environment variable and make the token
   390        # available to the "template" stanza. The Nomad client handles the renewal
   391        # and revocation of the Vault token.
   392        #
   393        # For more information and examples on the "vault" stanza, please see
   394        # the online documentation at:
   395        #
   396        #     https://www.nomadproject.io/docs/job-specification/vault
   397        #
   398        # vault {
   399        #   policies      = ["cdn", "frontend"]
   400        #   change_mode   = "signal"
   401        #   change_signal = "SIGHUP"
   402        # }
   403  
   404        # Controls the timeout between signalling a task it will be killed
   405        # and killing the task. If not set a default is used.
   406        # kill_timeout = "20s"
   407      }
   408    }
   409  }