github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/demo/csi/ceph-csi-plugin/plugin-cephrbd-controller.nomad (about)

     1  variable "cluster_id" {
     2    type = string
     3    # generated from uuid5(dns) with ceph.example.com as the seed
     4    default     = "e9ba69fa-67ff-5920-b374-84d5801edd19"
     5    description = "cluster ID for the Ceph monitor"
     6  }
     7  
     8  job "plugin-cephrbd-controller" {
     9    datacenters = ["dc1", "dc2"]
    10  
    11    constraint {
    12      attribute = "${attr.kernel.name}"
    13      value     = "linux"
    14    }
    15  
    16    type = "service"
    17  
    18    group "cephrbd" {
    19  
    20      network {
    21        port "prometheus" {}
    22      }
    23  
    24      service {
    25        name = "prometheus"
    26        port = "prometheus"
    27        tags = ["ceph-csi"]
    28      }
    29  
    30      task "plugin" {
    31        driver = "docker"
    32  
    33        config {
    34          image = "quay.io/cephcsi/cephcsi:canary"
    35  
    36          args = [
    37            "--drivername=rbd.csi.ceph.com",
    38            "--v=5",
    39            "--type=rbd",
    40            "--controllerserver=true",
    41            "--nodeid=${NODE_ID}",
    42            "--instanceid=${POD_ID}",
    43            "--endpoint=${CSI_ENDPOINT}",
    44            "--metricsport=${NOMAD_PORT_prometheus}",
    45          ]
    46  
    47          ports = ["prometheus"]
    48  
    49          # we need to be able to write key material to disk in this location
    50          mount {
    51            type     = "bind"
    52            source   = "secrets"
    53            target   = "/tmp/csi/keys"
    54            readonly = false
    55          }
    56  
    57          mount {
    58            type     = "bind"
    59            source   = "ceph-csi-config/config.json"
    60            target   = "/etc/ceph-csi-config/config.json"
    61            readonly = false
    62          }
    63  
    64        }
    65  
    66        template {
    67          data = <<-EOT
    68  POD_ID=${NOMAD_ALLOC_ID}
    69  NODE_ID=${node.unique.id}
    70  CSI_ENDPOINT=unix://csi/csi.sock
    71  EOT
    72  
    73          destination = "${NOMAD_TASK_DIR}/env"
    74          env         = true
    75        }
    76  
    77        # ceph configuration file
    78        template {
    79  
    80          data = <<EOF
    81  [{
    82      "clusterID": "${var.cluster_id}",
    83      "monitors": [
    84          {{range $index, $service := service "ceph-mon"}}{{if gt $index 0}}, {{end}}"{{.Address}}"{{end}}
    85      ]
    86  }]
    87  EOF
    88  
    89          destination = "ceph-csi-config/config.json"
    90        }
    91  
    92        csi_plugin {
    93          id        = "cephrbd"
    94          type      = "controller"
    95          mount_dir = "/csi"
    96        }
    97  
    98        # note: there's no upstream guidance on resource usage so
    99        # this is a best guess until we profile it in heavy use
   100        resources {
   101          cpu    = 256
   102          memory = 256
   103        }
   104      }
   105    }
   106  }