github.com/mattyr/nomad@v0.3.3-0.20160919021406-3485a065154a/command/init.go (about)

     1  package command
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"os"
     7  	"strings"
     8  )
     9  
    10  const (
    11  	// DefaultInitName is the default name we use when
    12  	// initializing the example file
    13  	DefaultInitName = "example.nomad"
    14  )
    15  
    16  // InitCommand generates a new job template that you can customize to your
    17  // liking, like vagrant init
    18  type InitCommand struct {
    19  	Meta
    20  }
    21  
    22  func (c *InitCommand) Help() string {
    23  	helpText := `
    24  Usage: nomad init
    25  
    26    Creates an example job file that can be used as a starting
    27    point to customize further.
    28  `
    29  	return strings.TrimSpace(helpText)
    30  }
    31  
    32  func (c *InitCommand) Synopsis() string {
    33  	return "Create an example job file"
    34  }
    35  
    36  func (c *InitCommand) Run(args []string) int {
    37  	// Check for misuse
    38  	if len(args) != 0 {
    39  		c.Ui.Error(c.Help())
    40  		return 1
    41  	}
    42  
    43  	// Check if the file already exists
    44  	_, err := os.Stat(DefaultInitName)
    45  	if err != nil && !os.IsNotExist(err) {
    46  		c.Ui.Error(fmt.Sprintf("Failed to stat '%s': %v", DefaultInitName, err))
    47  		return 1
    48  	}
    49  	if !os.IsNotExist(err) {
    50  		c.Ui.Error(fmt.Sprintf("Job '%s' already exists", DefaultInitName))
    51  		return 1
    52  	}
    53  
    54  	// Write out the example
    55  	err = ioutil.WriteFile(DefaultInitName, []byte(defaultJob), 0660)
    56  	if err != nil {
    57  		c.Ui.Error(fmt.Sprintf("Failed to write '%s': %v", DefaultInitName, err))
    58  		return 1
    59  	}
    60  
    61  	// Success
    62  	c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName))
    63  	return 0
    64  }
    65  
    66  var defaultJob = strings.TrimSpace(`
    67  # There can only be a single job definition per file.
    68  # Create a job with ID and Name 'example'
    69  job "example" {
    70  	# Run the job in the global region, which is the default.
    71  	# region = "global"
    72  
    73  	# Specify the datacenters within the region this job can run in.
    74  	datacenters = ["dc1"]
    75  
    76  	# Service type jobs optimize for long-lived services. This is
    77  	# the default but we can change to batch for short-lived tasks.
    78  	# type = "service"
    79  
    80  	# Priority controls our access to resources and scheduling priority.
    81  	# This can be 1 to 100, inclusively, and defaults to 50.
    82  	# priority = 50
    83  
    84  	# Restrict our job to only linux. We can specify multiple
    85  	# constraints as needed.
    86  	constraint {
    87  		attribute = "${attr.kernel.name}"
    88  		value = "linux"
    89  	}
    90  
    91  	# Configure the job to do rolling updates
    92  	update {
    93  		# Stagger updates every 10 seconds
    94  		stagger = "10s"
    95  
    96  		# Update a single task at a time
    97  		max_parallel = 1
    98  	}
    99  
   100  	# Create a 'cache' group. Each task in the group will be
   101  	# scheduled onto the same machine.
   102  	group "cache" {
   103  		# Control the number of instances of this group.
   104  		# Defaults to 1
   105  		# count = 1
   106  
   107  		# Configure the restart policy for the task group. If not provided, a
   108  		# default is used based on the job type.
   109  		restart {
   110  			# The number of attempts to run the job within the specified interval.
   111  			attempts = 10
   112  			interval = "5m"
   113  			
   114  			# A delay between a task failing and a restart occurring.
   115  			delay = "25s"
   116  
   117  			# Mode controls what happens when a task has restarted "attempts"
   118  			# times within the interval. "delay" mode delays the next restart
   119  			# till the next interval. "fail" mode does not restart the task if
   120  			# "attempts" has been hit within the interval.
   121  			mode = "delay"
   122  		}
   123  
   124  		ephemeral_disk {
   125  			# When sticky is true and the task group is updated, the scheduler
   126  			# will prefer to place the updated allocation on the same node and
   127  			# will migrate the data. This is useful for tasks that store data
   128  			# that should persist across allocation updates.
   129  			# sticky = true
   130  
   131  			# Size of the shared ephemeral disk between tasks in the task group.
   132  			size = 300
   133  		}
   134  
   135  		# Define a task to run
   136  		task "redis" {
   137  			# Use Docker to run the task.
   138  			driver = "docker"
   139  
   140  			# Configure Docker driver with the image
   141  			config {
   142  				image = "redis:latest"
   143  				port_map {
   144  					db = 6379
   145  				}
   146  			}
   147  
   148  			service {
   149  				name = "${TASKGROUP}-redis"
   150  				tags = ["global", "cache"]
   151  				port = "db"
   152  				check {
   153  					name = "alive"
   154  					type = "tcp"
   155  					interval = "10s"
   156  					timeout = "2s"
   157  				}
   158  			}
   159  
   160  			# We must specify the resources required for
   161  			# this task to ensure it runs on a machine with
   162  			# enough capacity.
   163  			resources {
   164  				cpu = 500 # 500 MHz
   165  				memory = 256 # 256MB
   166  				network {
   167  					mbits = 10
   168  					port "db" {
   169  					}
   170  				}
   171  			}
   172  
   173  			# The artifact block can be specified one or more times to download
   174  			# artifacts prior to the task being started. This is convenient for
   175  			# shipping configs or data needed by the task.
   176  			# artifact {
   177  			#	  source = "http://foo.com/artifact.tar.gz"
   178  			#	  options {
   179  			#	      checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
   180  			#     }
   181  			# }
   182  			
   183  			# Specify configuration related to log rotation
   184  			# logs {
   185  			#     max_files = 10
   186  			#     max_file_size = 15
   187  			# }
   188  			 
   189  			# Controls the timeout between signalling a task it will be killed
   190  			# and killing the task. If not set a default is used.
   191  			# kill_timeout = "20s"
   192  		}
   193  	}
   194  }
   195  `)