github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/demo/csi/ceph-csi-plugin/plugin-cephrbd-controller-vagrant.nomad (about)

     1  variable "cluster_id" {
     2    type = string
     3    # generated from uuid5(dns) with ceph.example.com as the seed
     4    default     = "e9ba69fa-67ff-5920-b374-84d5801edd19"
     5    description = "cluster ID for the Ceph monitor"
     6  }
     7  
     8  job "plugin-cephrbd-controller" {
     9    datacenters = ["dc1", "dc2"]
    10  
    11    constraint {
    12      attribute = "${attr.kernel.name}"
    13      value     = "linux"
    14    }
    15  
    16    type = "service"
    17  
    18    group "cephrbd" {
    19  
    20      network {
    21        # we can't configure networking in a way that will both satisfy the Ceph
    22        # monitor's requirement to know its own IP address *and* be routable
    23        # between containers, without either CNI or fixing
    24        # https://github.com/hashicorp/nomad/issues/9781
    25        #
    26        # So for now we'll use host networking to keep this demo understandable.
    27        # That also means the controller plugin will need to use host addresses.
    28        mode = "host"
    29  
    30        port "prometheus" {}
    31      }
    32  
    33      service {
    34        name = "prometheus"
    35        port = "prometheus"
    36        tags = ["ceph-csi"]
    37      }
    38  
    39      task "plugin" {
    40        driver = "docker"
    41  
    42        config {
    43          image = "quay.io/cephcsi/cephcsi:canary"
    44  
    45          args = [
    46            "--drivername=rbd.csi.ceph.com",
    47            "--v=5",
    48            "--type=rbd",
    49            "--controllerserver=true",
    50            "--nodeid=${NODE_ID}",
    51            "--instanceid=${POD_ID}",
    52            "--endpoint=${CSI_ENDPOINT}",
    53            "--metricsport=${NOMAD_PORT_prometheus}",
    54          ]
    55  
    56          network_mode = "host"
    57          ports        = ["prometheus"]
    58  
    59          # we need to be able to write key material to disk in this location
    60          mount {
    61            type     = "bind"
    62            source   = "secrets"
    63            target   = "/tmp/csi/keys"
    64            readonly = false
    65          }
    66  
    67          mount {
    68            type     = "bind"
    69            source   = "ceph-csi-config/config.json"
    70            target   = "/etc/ceph-csi-config/config.json"
    71            readonly = false
    72          }
    73  
    74        }
    75  
    76        template {
    77          data = <<-EOT
    78  POD_ID=${NOMAD_ALLOC_ID}
    79  NODE_ID=${node.unique.id}
    80  CSI_ENDPOINT=unix://csi/csi.sock
    81  EOT
    82  
    83          destination = "${NOMAD_TASK_DIR}/env"
    84          env         = true
    85        }
    86  
    87        # ceph configuration file
    88        template {
    89          data = <<-EOT
    90  [{
    91      "clusterID": "${var.cluster_id}",
    92      "monitors": [
    93          "{{ sockaddr "with $ifAddrs := GetDefaultInterfaces | include \"type\" \"IPv4\" | limit 1 -}}{{- range $ifAddrs -}}{{ attr \"address\" . }}{{ end }}{{ end " }}:3300"
    94      ]
    95  }]
    96  EOT
    97  
    98          destination = "ceph-csi-config/config.json"
    99        }
   100  
   101        csi_plugin {
   102          id        = "cephrbd"
   103          type      = "controller"
   104          mount_dir = "/csi"
   105        }
   106  
   107        # note: there's no upstream guidance on resource usage so
   108        # this is a best guess until we profile it in heavy use
   109        resources {
   110          cpu    = 256
   111          memory = 256
   112        }
   113      }
   114    }
   115  }