github.com/janma/nomad@v0.11.3/command/assets/connect.nomad (about)

     1  # There can only be a single job definition per file. This job is named
     2  # "countdash" so it will create a job with the ID and Name "countdash".
     3  
     4  # The "job" stanza is the top-most configuration option in the job
     5  # specification. A job is a declarative specification of tasks that Nomad
     6  # should run. Jobs have a globally unique name, one or many task groups, which
     7  # are themselves collections of one or many tasks.
     8  #
     9  # For more information and examples on the "job" stanza, please see
    10  # the online documentation at:
    11  #
    12  #     https://www.nomadproject.io/docs/job-specification/job.html
    13  #
    14  job "countdash" {
    15    # The "region" parameter specifies the region in which to execute the job. If
    16    # omitted, this inherits the default region name of "global".
    17    # region = "global"
    18    #
    19    # The "datacenters" parameter specifies the list of datacenters which should
    20    # be considered when placing this task. This must be provided.
    21    datacenters = ["dc1"]
    22  
    23    # The "type" parameter controls the type of job, which impacts the scheduler's
    24    # decision on placement. This configuration is optional and defaults to
    25    # "service". For a full list of job types and their differences, please see
    26    # the online documentation.
    27    #
    28    # For more information, please see the online documentation at:
    29    #
    30    #     https://www.nomadproject.io/docs/jobspec/schedulers.html
    31    #
    32    type = "service"
    33  
    34    # The "constraint" stanza defines additional constraints for placing this job,
    35    # in addition to any resource or driver constraints. This stanza may be placed
    36    # at the "job", "group", or "task" level, and supports variable interpolation.
    37    #
    38    # For more information and examples on the "constraint" stanza, please see
    39    # the online documentation at:
    40    #
    41    #     https://www.nomadproject.io/docs/job-specification/constraint.html
    42    #
    43    # constraint {
    44    #   attribute = "${attr.kernel.name}"
    45    #   value     = "linux"
    46    # }
    47  
    48    # The "update" stanza specifies the update strategy of task groups. The update
    49    # strategy is used to control things like rolling upgrades, canaries, and
    50    # blue/green deployments. If omitted, no update strategy is enforced. The
    51    # "update" stanza may be placed at the job or task group. When placed at the
    52    # job, it applies to all groups within the job. When placed at both the job and
    53    # group level, the stanzas are merged with the group's taking precedence.
    54    #
    55    # For more information and examples on the "update" stanza, please see
    56    # the online documentation at:
    57    #
    58    #     https://www.nomadproject.io/docs/job-specification/update.html
    59    #
    60    update {
    61      # The "max_parallel" parameter specifies the maximum number of updates to
    62      # perform in parallel. In this case, this specifies to update a single task
    63      # at a time.
    64      max_parallel = 1
    65  
    66      # The "min_healthy_time" parameter specifies the minimum time the allocation
    67      # must be in the healthy state before it is marked as healthy and unblocks
    68      # further allocations from being updated.
    69      min_healthy_time = "10s"
    70  
    71      # The "healthy_deadline" parameter specifies the deadline in which the
    72      # allocation must be marked as healthy after which the allocation is
    73      # automatically transitioned to unhealthy. Transitioning to unhealthy will
    74      # fail the deployment and potentially roll back the job if "auto_revert" is
    75      # set to true.
    76      healthy_deadline = "3m"
    77  
    78      # The "progress_deadline" parameter specifies the deadline in which an
    79      # allocation must be marked as healthy. The deadline begins when the first
    80      # allocation for the deployment is created and is reset whenever an allocation
    81      # as part of the deployment transitions to a healthy state. If no allocation
    82      # transitions to the healthy state before the progress deadline, the
    83      # deployment is marked as failed.
    84      progress_deadline = "10m"
    85  
    86      # The "auto_revert" parameter specifies if the job should auto-revert to the
    87      # last stable job on deployment failure. A job is marked as stable if all the
    88      # allocations as part of its deployment were marked healthy.
    89      auto_revert = false
    90  
    91      # The "canary" parameter specifies that changes to the job that would result
    92      # in destructive updates should create the specified number of canaries
    93      # without stopping any previous allocations. Once the operator determines the
    94      # canaries are healthy, they can be promoted which unblocks a rolling update
    95      # of the remaining allocations at a rate of "max_parallel".
    96      #
    97      # Further, setting "canary" equal to the count of the task group allows
    98      # blue/green deployments. When the job is updated, a full set of the new
    99      # version is deployed and upon promotion the old version is stopped.
   100      canary = 0
   101    }
   102    # The migrate stanza specifies the group's strategy for migrating off of
   103    # draining nodes. If omitted, a default migration strategy is applied.
   104    #
   105    # For more information on the "migrate" stanza, please see
   106    # the online documentation at:
   107    #
   108    #     https://www.nomadproject.io/docs/job-specification/migrate.html
   109    #
   110    migrate {
   111      # Specifies the number of task groups that can be migrated at the same
   112      # time. This number must be less than the total count for the group as
   113      # (count - max_parallel) will be left running during migrations.
   114      max_parallel = 1
   115  
   116      # Specifies the mechanism in which allocations health is determined. The
   117      # potential values are "checks" or "task_states".
   118      health_check = "checks"
   119  
   120      # Specifies the minimum time the allocation must be in the healthy state
   121      # before it is marked as healthy and unblocks further allocations from being
   122      # migrated. This is specified using a label suffix like "30s" or "15m".
   123      min_healthy_time = "10s"
   124  
   125      # Specifies the deadline in which the allocation must be marked as healthy
   126      # after which the allocation is automatically transitioned to unhealthy. This
   127      # is specified using a label suffix like "2m" or "1h".
   128      healthy_deadline = "5m"
   129    }
   130    # The "group" stanza defines a series of tasks that should be co-located on
   131    # the same Nomad client. Any task within a group will be placed on the same
   132    # client.
   133    #
   134    # For more information and examples on the "group" stanza, please see
   135    # the online documentation at:
   136    #
   137    #     https://www.nomadproject.io/docs/job-specification/group.html
   138    #
   139    group "api" {
   140      # The "count" parameter specifies the number of the task groups that should
   141      # be running under this group. This value must be non-negative and defaults
   142      # to 1.
   143      count = 1
   144  
   145      # The "restart" stanza configures a group's behavior on task failure. If
   146      # left unspecified, a default restart policy is used based on the job type.
   147      #
   148      # For more information and examples on the "restart" stanza, please see
   149      # the online documentation at:
   150      #
   151      #     https://www.nomadproject.io/docs/job-specification/restart.html
   152      #
   153      restart {
   154        # The number of attempts to run the job within the specified interval.
   155        attempts = 2
   156        interval = "30m"
   157  
   158        # The "delay" parameter specifies the duration to wait before restarting
   159        # a task after it has failed.
   160        delay = "15s"
   161  
   162        # The "mode" parameter controls what happens when a task has restarted
   163        # "attempts" times within the interval. "delay" mode delays the next
   164        # restart until the next interval. "fail" mode does not restart the task
   165        # if "attempts" has been hit within the interval.
   166        mode = "fail"
   167      }
   168  
   169      # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
   170      # instead of a hard disk requirement. Clients using this stanza should
   171      # not specify disk requirements in the resources stanza of the task. All
   172      # tasks in this group will share the same ephemeral disk.
   173      #
   174      # For more information and examples on the "ephemeral_disk" stanza, please
   175      # see the online documentation at:
   176      #
   177      #     https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
   178      #
   179      ephemeral_disk {
   180        # When sticky is true and the task group is updated, the scheduler
   181        # will prefer to place the updated allocation on the same node and
   182        # will migrate the data. This is useful for tasks that store data
   183        # that should persist across allocation updates.
   184        # sticky = true
   185        #
   186        # Setting migrate to true results in the allocation directory of a
   187        # sticky allocation directory to be migrated.
   188        # migrate = true
   189        #
   190        # The "size" parameter specifies the size in MB of shared ephemeral disk
   191        # between tasks in the group.
   192        size = 300
   193      }
   194  
   195      # The "affinity" stanza enables operators to express placement preferences
   196      # based on node attributes or metadata.
   197      #
   198      # For more information and examples on the "affinity" stanza, please
   199      # see the online documentation at:
   200      #
   201      #     https://www.nomadproject.io/docs/job-specification/affinity.html
   202      #
   203      # affinity {
   204      # attribute specifies the name of a node attribute or metadata
   205      # attribute = "${node.datacenter}"
   206  
   207  
   208      # value specifies the desired attribute value. In this example Nomad
   209      # will prefer placement in the "us-west1" datacenter.
   210      # value  = "us-west1"
   211  
   212  
   213      # weight can be used to indicate relative preference
   214      # when the job has more than one affinity. It defaults to 50 if not set.
   215      # weight = 100
   216      #  }
   217  
   218  
   219      # The "spread" stanza allows operators to increase the failure tolerance of
   220      # their applications by specifying a node attribute that allocations
   221      # should be spread over.
   222      #
   223      # For more information and examples on the "spread" stanza, please
   224      # see the online documentation at:
   225      #
   226      #     https://www.nomadproject.io/docs/job-specification/spread.html
   227      #
   228      # spread {
   229      # attribute specifies the name of a node attribute or metadata
   230      # attribute = "${node.datacenter}"
   231  
   232  
   233      # targets can be used to define desired percentages of allocations
   234      # for each targeted attribute value.
   235      #
   236      #   target "us-east1" {
   237      #     percent = 60
   238      #   }
   239      #   target "us-west1" {
   240      #     percent = 40
   241      #   }
   242      #  }
   243  
   244      # The "network" stanza for a group creates a network namespace shared
   245      # by all tasks within the group.
   246      network {
   247        # "mode" is the CNI plugin used to configure the network namespace.
   248        # see the documentation for CNI plugins at:
   249        #
   250        #     https://github.com/containernetworking/plugins
   251        #
   252        mode = "bridge"
   253  
   254        # The service we define for this group is accessible only via
   255        # Consul Connect, so we do not define ports in its network.
   256        # port "http" {
   257        #   to = "8080"
   258        # }
   259      }
   260      # The "service" stanza enables Consul Connect.
   261      service {
   262        name = "count-api"
   263  
   264        # The port in the service stanza is the port the service listens on.
   265        # The Envoy proxy will automatically route traffic to that port
   266        # inside the network namespace. If the application binds to localhost
   267        # on this port, the task needs no additional network configuration.
   268        port = "9001"
   269  
   270        # The "check" stanza specifies a health check associated with the service.
   271        # This can be specified multiple times to define multiple checks for the
   272        # service. Note that checks run inside the task indicated by the "task"
   273        # field.
   274        #
   275        # check {
   276        #   name     = "alive"
   277        #   type     = "tcp"
   278        #   task     = "api"
   279        #   interval = "10s"
   280        #   timeout  = "2s"
   281        # }
   282  
   283        connect {
   284          # The "sidecar_service" stanza configures the Envoy sidecar admission
   285          # controller. For each task group with a sidecar_service, Nomad  will
   286          # inject an Envoy task into the task group. A group network will be
   287          # required and a dynamic port will be registered for remote services
   288          # to connect to Envoy with the name `connect-proxy-<service>`.
   289          #
   290          # By default, Envoy will be run via its official upstream Docker image.
   291          sidecar_service {}
   292        }
   293      }
   294      # The "task" stanza creates an individual unit of work, such as a Docker
   295      # container, web application, or batch processing.
   296      #
   297      # For more information and examples on the "task" stanza, please see
   298      # the online documentation at:
   299      #
   300      #     https://www.nomadproject.io/docs/job-specification/task.html
   301      #
   302      task "web" {
   303        # The "driver" parameter specifies the task driver that should be used to
   304        # run the task.
   305        driver = "docker"
   306  
   307        # The "config" stanza specifies the driver configuration, which is passed
   308        # directly to the driver to start the task. The details of configurations
   309        # are specific to each driver, so please see specific driver
   310        # documentation for more information.
   311        config {
   312          image = "hashicorpnomad/counter-api:v1"
   313        }
   314  
   315        # The "artifact" stanza instructs Nomad to download an artifact from a
   316        # remote source prior to starting the task. This provides a convenient
   317        # mechanism for downloading configuration files or data needed to run the
   318        # task. It is possible to specify the "artifact" stanza multiple times to
   319        # download multiple artifacts.
   320        #
   321        # For more information and examples on the "artifact" stanza, please see
   322        # the online documentation at:
   323        #
   324        #     https://www.nomadproject.io/docs/job-specification/artifact.html
   325        #
   326        # artifact {
   327        #   source = "http://foo.com/artifact.tar.gz"
   328        #   options {
   329        #     checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
   330        #   }
   331        # }
   332  
   333  
   334        # The "logs" stanza instructs the Nomad client on how many log files and
   335        # the maximum size of those logs files to retain. Logging is enabled by
   336        # default, but the "logs" stanza allows for finer-grained control over
   337        # the log rotation and storage configuration.
   338        #
   339        # For more information and examples on the "logs" stanza, please see
   340        # the online documentation at:
   341        #
   342        #     https://www.nomadproject.io/docs/job-specification/logs.html
   343        #
   344        # logs {
   345        #   max_files     = 10
   346        #   max_file_size = 15
   347        # }
   348  
   349        # The "resources" stanza describes the requirements a task needs to
   350        # execute. Resource requirements include memory, network, cpu, and more.
   351        # This ensures the task will execute on a machine that contains enough
   352        # resource capacity.
   353        #
   354        # For more information and examples on the "resources" stanza, please see
   355        # the online documentation at:
   356        #
   357        #     https://www.nomadproject.io/docs/job-specification/resources.html
   358        #
   359        resources {
   360          cpu    = 500 # 500 MHz
   361          memory = 256 # 256MB
   362        }
   363      }
   364  
   365      # The Envoy sidecar admission controller will inject an Envoy task into
   366      # any task group for each service with a sidecar_service stanza it contains.
   367      # A group network will be required and a dynamic port will be registered for
   368      # remote services to connect to Envoy with the name `connect-proxy-<service>`.
   369      # By default, Envoy will be run via its official upstream Docker image.
   370      #
   371      # There are two ways to modify the default behavior:
   372      #   * Tasks can define a `sidecar_task` stanza in the `connect` stanza
   373      #     that merges into the default sidecar configuration.
   374      #   * Add the `kind = "connect-proxy:<service>"` field to another task.
   375      #     That task will be replace the default Envoy proxy task entirely.
   376      #
   377      # task "connect-<service>" {
   378      #   kind   = "connect-proxy:<service>"
   379      #   driver = "docker"
   380  
   381      #   config {
   382      #     image = "${meta.connect.sidecar_image}"
   383      #     args  = [
   384      #      "-c", "${NOMAD_TASK_DIR}/bootstrap.json",
   385      #      "-l", "${meta.connect.log_level}"
   386      #     ]
   387      #   }
   388  
   389      #   resources {
   390      #     cpu    = 100
   391      #     memory = 300
   392      #   }
   393  
   394      #   logs {
   395      #     max_files     = 2
   396      #     max_file_size = 2
   397      #   }
   398      # }
   399    }
   400    # This job has a second "group" stanza to define tasks that might be placed
   401    # on a separate Nomad client from the group above.
   402    #
   403    group "dashboard" {
   404      network {
   405        mode = "bridge"
   406  
   407        # The `static = 9002` parameter requests the Nomad scheduler reserve
   408        # port 9002 on a host network interface. The `to = 9002` parameter
   409        # forwards that host port to port 9002 inside the network namespace.
   410        port "http" {
   411          static = 9002
   412          to     = 9002
   413        }
   414      }
   415  
   416      service {
   417        name = "count-dashboard"
   418        port = "9002"
   419  
   420        connect {
   421          sidecar_service {
   422            proxy {
   423              # The upstreams stanza defines the remote service to access
   424              # (count-api) and what port to expose that service on inside
   425              # the network namespace. This allows this task to reach the
   426              # upstream at localhost:8080.
   427              upstreams {
   428                destination_name = "count-api"
   429                local_bind_port  = 8080
   430              }
   431            }
   432          }
   433  
   434          # The `sidecar_task` stanza modifies the default configuration
   435          # of the Envoy proxy task.
   436          # sidecar_task {
   437          #   resources {
   438          #     cpu    = 1000
   439          #     memory = 512
   440          #   }
   441          # }
   442        }
   443      }
   444  
   445      task "dashboard" {
   446        driver = "docker"
   447  
   448        # The application can take advantage of automatically created
   449        # environment variables to find the address of its upstream
   450        # service.
   451        env {
   452          COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api}"
   453        }
   454  
   455        config {
   456          image = "hashicorpnomad/counter-dashboard:v1"
   457        }
   458      }
   459    }
   460  }