github.com/openebs/api@v1.12.0/design/cstor/v1/example.md (about)

     1  The proposed CSPC schema in go struct is mentioned in the cstorpool.md file in the current directory.
     2  
     3  Following is an example CSPC in YAML form. This YAML form of CSPC is what users intearact with and hence
     4  YAML can help understand more what features/capabilities CSPC API has!
     5  
     6  Following YAML is typically what one will see when `kubectl get cspc -n openebs demo-pool-cluster` is executed and 
     7  `demo-pool-cluster` exists in the system.
     8  Hence, it should be worth noting that few fields appearing in the CSPC are not meant to be changed/modified or entered by 
     9  a user and it is system-generated or is for use by system.
    10  Most system generated fields are meant ot provide a user with additional info e.g. status.
    11  
    12  ```YAML
    13  apiVersion: cstor.openebs.io/v1
    14  kind: CStorPoolCluster
    15  metadata:
    16    name: demo-pool-cluster
    17    namespace: openebs
    18  spec:
    19    # Following fields i.e. resources, auxResources, tolerations and priorityClassName
    20    # applies to pool for which these values are left unspecified at poolConfig
    21    # spec.pools field contains a list and an item of the list is nothing but 
    22    # specification of a cstor-pool.
    23    # poolConfig field is present in this spec.pools[i]
    24    resources:
    25      requests:
    26        memory: "64Mi"
    27        cpu: "250m"
    28      limits:
    29        memory: "128Mi"
    30        cpu: "500m"
    31  
    32    auxResources:
    33      requests:
    34        memory: "50Mi"
    35        cpu: "400m"
    36      limits:
    37        memory: "100Mi"
    38        cpu: "400m"
    39  
    40    tolerations:
    41    - key: data-plane-node
    42      operator: Equal
    43      value: true
    44      effect: NoSchedule
    45  
    46    priorityClassName: high-priority 
    47    # Total 3 pools are specified and hence three CStorPoolInstances CR will be created.
    48    # Also, total 3 cstor-pool-manager-pods are created to manage the CStorPoolInstances CRs.
    49    pools:
    50      # This is the node where cstor-pool-manager pod will get scheduled for this config. 
    51      - nodeSelector:
    52          kubernetes.io/hostname: worker-node-1
    53  
    54        dataRaidGroups:
    55        - cspiBlockDevices:
    56            - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f36
    57              # System-Generated/Used
    58              capacity:
    59              # System-Generated/Used
    60              devLink: /dev/iscsi-0/1
    61            - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f37
    62              # System-Generated/Used
    63              capacity:
    64              # System-Generated/Used
    65              devLink: /dev/iscsi-0/2
    66  
    67        writeCacheGroups:
    68        - cspiBlockDevices:
    69            - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f38
    70              # System-Generated/Used
    71              capacity:
    72              # System-Generated/Used
    73              devLink: /dev/iscsi-1/1
    74  
    75        poolConfig:
    76          # Possible value for dataRaidGroupType are : {stripe, mirror, raidz1, raidz2}
    77          dataRaidGroupType: mirror
    78          # Possible value for writeCacheRaidGroupType is same as that of dataRaidGroupType
    79          writeCacheRaidGroupType: stripe
    80          thickProvisioning: false
    81          # Possible values for compression are : {lz, off} 
    82          compression: lz
    83          # Following resources are null or empty and therefore 
    84          # spec.resources, spec.auxResources and spec.tolerations
    85          # values will be used for the cstor-pool-manager pod.
    86          resources: null
    87          auxResources: null
    88          tolerations: null
    89          priorityClassName: ""
    90          
    91          # ROThresholdLimit is threshold(percentage base) limit
    92          # for pool read only mode. If ROThresholdLimit(%) amount
    93          # of pool storage is reached then pool will set to readonly.
    94          # NOTE:
    95          # 1. If ROThresholdLimit is set to 100 then entire
    96          #    pool storage will be used by default it will be set to 85%.
    97          # 2. ROThresholdLimit value will be 0 < ROThresholdLimit <= 100.
    98          roThresholdLimit : 70
    99  
   100      # This is the node where cstor-pool-manager pod will get scheduled for this config.
   101      - nodeSelector:
   102          kubernetes.io/hostname: worker-node-2
   103  
   104        dataRaidGroups:
   105        - cspiBlockDevices:
   106            - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f39
   107              # System-Generated/Used
   108              capacity:
   109              # System-Generated/Used
   110              devLink: /dev/iscsi-0/3
   111            - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f40
   112              # System-Generated/Used
   113              capacity:
   114              # System-Generated/Used
   115              devLink: /dev/iscsi-0/4
   116  
   117        writeCacheGroups:
   118        - cspiBlockDevices:
   119            - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f41
   120              # System-Generated/Used
   121              capacity:
   122              # System-Generated/Used
   123              devLink: /dev/iscsi-1/2
   124  
   125        poolConfig:
   126          dataRaidGroupType: mirror
   127          writeCacheRaidGroupType: stripe
   128          thickProvisioning: false
   129          compression: lz
   130          # Following resources are null or empty and therefore 
   131          # spec.resources, spec.auxResources and spec.tolerations
   132          # values will be used for the cstor-pool-manager pod.
   133          resources: null
   134          auxResources: null
   135          tolerations: null
   136          priorityClassName: ""
   137  
   138      - nodeSelector:
   139          kubernetes.io/hostname: worker-node-3
   140  
   141        dataRaidGroups:
   142        - cspiBlockDevices:
   143            - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f42
   144              # System-Generated/Used
   145              capacity:
   146              # System-Generated/Used
   147              devLink: /dev/iscsi-0/5
   148            - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f43
   149              # System-Generated/Used
   150              capacity:
   151              # System-Generated/Used
   152              devLink: /dev/iscsi-0/6
   153  
   154        writeCacheGroups:
   155        - cspiBlockDevices:
   156            - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f44
   157              # System-Generated/Used
   158              capacity:
   159              # System-Generated/Used
   160              devLink: /dev/iscsi-1/3
   161  
   162        poolConfig:
   163          dataRaidGroupType: stripe
   164          writeCacheRaidGroupType: stripe
   165          thickProvisioning: false
   166          compression: lz
   167          # Following resources are NOT null or empty and therefore 
   168          # spec.resources, spec.auxResources and spec.tolerations
   169          # values will be IGNORED and these values will we applied to
   170          # pool manager pod
   171          resources:
   172            requests:
   173              memory: 70Mi
   174              cpu: 300m
   175            limits:
   176              memory: 130Mi
   177              cpu: 600m
   178  
   179          auxResources:
   180            requests:
   181              memory: 60Mi
   182              cpu: 500m
   183            limits:
   184              memory: 120Mi
   185              cpu: 500m
   186  
   187          tolerations:
   188          - key: data-plane-node
   189            operator: Equal
   190            value: true
   191            effect: NoSchedule
   192  
   193          - key: apac-zone
   194            operator: Equal
   195            value: true
   196            effect: NoSchedule
   197  
   198          priorityClassName: utlra-priority
   199  
   200  # System-Generated/Used
   201  status:
   202    provisionedInstances: 3
   203    desiredInstances: 3
   204    healthyInstances: 3
   205    conditions:
   206    - type: PoolAvailability
   207      status: false
   208      # lastUpdateTime is the last time when CSPC was reconciled. 
   209      lastUpdateTime: 2020-03-13T03:56:01Z
   210      # lastTransitionTime is the last time when CSPC spec transitioned to a different spec 
   211      lastTransitionTime: 2020-03-13T03:56:49Z
   212      reason: CStor pool manger demo-pool-cluster-xsdfr pod is pending
   213      message: Cstor pool manager pod demo-pool-cluster-xsdfr not reachable
   214  versionDetails:
   215    autoUpgrade: false
   216    desired: 1.8
   217    status:
   218      dependentsUpgraded: true
   219      current: 1.8
   220      state: Reconciled
   221      message: ""
   222      reason: ""
   223      lastUpdateTime: 2020-03-13T03:56:49Z
   224  
   225  ```