github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/demo/csi/ceph-csi-plugin/ceph.nomad (about)

     1  # This job deploys Ceph as a Docker container in "demo mode"; it runs all its
     2  # processes in a single task and doesn't will not persist data after a restart
     3  
     4  variable "cluster_id" {
     5    type = string
     6    # generated from uuid5(dns) with ceph.example.com as the seed
     7    default     = "e9ba69fa-67ff-5920-b374-84d5801edd19"
     8    description = "cluster ID for the Ceph monitor"
     9  }
    10  
    11  variable "hostname" {
    12    type        = string
    13    default     = "linux" # hostname of the Nomad repo's Vagrant box
    14    description = "hostname of the demo host"
    15  }
    16  
    17  job "ceph" {
    18    datacenters = ["dc1"]
    19  
    20    group "ceph" {
    21  
    22      network {
    23        # we can't configure networking in a way that will both satisfy the Ceph
    24        # monitor's requirement to know its own IP address *and* be routable
    25        # between containers, without either CNI or fixing
    26        # https://github.com/hashicorp/nomad/issues/9781
    27        #
    28        # So for now we'll use host networking to keep this demo understandable.
    29        # That also means the controller plugin will need to use host addresses.
    30        mode = "host"
    31      }
    32  
    33      service {
    34        name = "ceph-mon"
    35        port = 3300
    36      }
    37  
    38      service {
    39        name = "ceph-dashboard"
    40        port = 5000
    41  
    42        check {
    43          type           = "http"
    44          interval       = "5s"
    45          timeout        = "1s"
    46          path           = "/"
    47          initial_status = "warning"
    48        }
    49      }
    50  
    51      task "ceph" {
    52        driver = "docker"
    53  
    54        config {
    55          image        = "ceph/daemon:latest-octopus"
    56          args         = ["demo"]
    57          network_mode = "host"
    58          privileged   = true
    59  
    60          mount {
    61            type   = "bind"
    62            source = "local/ceph"
    63            target = "/etc/ceph"
    64          }
    65        }
    66  
    67        resources {
    68          memory = 512
    69          cpu    = 256
    70        }
    71  
    72        template {
    73  
    74          data = <<EOT
    75  MON_IP={{ sockaddr "with $ifAddrs := GetDefaultInterfaces | include \"type\" \"IPv4\" | limit 1 -}}{{- range $ifAddrs -}}{{ attr \"address\" . }}{{ end }}{{ end " }}
    76  CEPH_PUBLIC_NETWORK=0.0.0.0/0
    77  CEPH_DEMO_UID=demo
    78  CEPH_DEMO_BUCKET=foobar
    79  EOT
    80  
    81  
    82          destination = "${NOMAD_TASK_DIR}/env"
    83          env         = true
    84        }
    85  
    86        template {
    87          data        = <<EOT
    88  [global]
    89  fsid = ${var.cluster_id}
    90  mon initial members = ${var.hostname}
    91  mon host = v2:{{ sockaddr "with $ifAddrs := GetDefaultInterfaces | include \"type\" \"IPv4\" | limit 1 -}}{{- range $ifAddrs -}}{{ attr \"address\" . }}{{ end }}{{ end " }}:3300/0
    92  
    93  osd crush chooseleaf type = 0
    94  osd journal size = 100
    95  public network = 0.0.0.0/0
    96  cluster network = 0.0.0.0/0
    97  osd pool default size = 1
    98  mon warn on pool no redundancy = false
    99  osd_memory_target =  939524096
   100  osd_memory_base = 251947008
   101  osd_memory_cache_min = 351706112
   102  osd objectstore = bluestore
   103  
   104  [osd.0]
   105  osd data = /var/lib/ceph/osd/ceph-0
   106  
   107  
   108  [client.rgw.linux]
   109  rgw dns name = ${var.hostname}
   110  rgw enable usage log = true
   111  rgw usage log tick interval = 1
   112  rgw usage log flush threshold = 1
   113  rgw usage max shards = 32
   114  rgw usage max user shards = 1
   115  log file = /var/log/ceph/client.rgw.linux.log
   116  rgw frontends = beast  endpoint=0.0.0.0:8080
   117  
   118  EOT
   119          destination = "${NOMAD_TASK_DIR}/ceph/ceph.conf"
   120        }
   121      }
   122    }
   123  }