github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/demo/csi/ceph-csi-plugin/plugin-cephrbd-node.nomad (about)

     1  job "plugin-cephrbd-node" {
     2    datacenters = ["dc1", "dc2"]
     3  
     4    constraint {
     5      attribute = "${attr.kernel.name}"
     6      value     = "linux"
     7    }
     8  
     9    type = "system"
    10  
    11    group "cephrbd" {
    12  
    13      network {
    14        port "prometheus" {}
    15      }
    16  
    17      service {
    18        name = "prometheus"
    19        port = "prometheus"
    20        tags = ["ceph-csi"]
    21      }
    22  
    23      task "plugin" {
    24        driver = "docker"
    25  
    26        config {
    27          image = "quay.io/cephcsi/cephcsi:canary"
    28  
    29          args = [
    30            "--drivername=rbd.csi.ceph.com",
    31            "--v=5",
    32            "--type=rbd",
    33            "--nodeserver=true",
    34            "--nodeid=${NODE_ID}",
    35            "--instanceid=${POD_ID}",
    36            "--endpoint=${CSI_ENDPOINT}",
    37            "--metricsport=${NOMAD_PORT_prometheus}",
    38          ]
    39  
    40          privileged = true
    41          ports      = ["prometheus"]
    42        }
    43  
    44        template {
    45          data = <<-EOT
    46  POD_ID=${NOMAD_ALLOC_ID}
    47  NODE_ID=${node.unique.id}
    48  CSI_ENDPOINT=unix://csi/csi.sock
    49  EOT
    50  
    51          destination = "${NOMAD_TASK_DIR}/env"
    52          env         = true
    53        }
    54  
    55        csi_plugin {
    56          id        = "cephrbd"
    57          type      = "node"
    58          mount_dir = "/csi"
    59        }
    60  
    61        # note: there's no upstream guidance on resource usage so
    62        # this is a best guess until we profile it in heavy use
    63        resources {
    64          cpu    = 256
    65          memory = 256
    66        }
    67      }
    68    }
    69  }