github.com/alibaba/sealer@v0.8.6-0.20220430115802-37a2bdaa8173/applications/mysql-operator/cr.yaml (about)

     1  apiVersion: pxc.percona.com/v1-11-0
     2  kind: PerconaXtraDBCluster
     3  metadata:
     4    name: cluster1
     5    finalizers:
     6      - delete-pxc-pods-in-order
     7  #    - delete-proxysql-pvc
     8  #    - delete-pxc-pvc
     9  #  annotations:
    10  #    percona.com/issue-vault-token: "true"
    11  spec:
    12    crVersion: 1.11.0
    13  #  secretsName: my-cluster-secrets
    14  #  vaultSecretName: keyring-secret-vault
    15  #  sslSecretName: my-cluster-ssl
    16  #  sslInternalSecretName: my-cluster-ssl-internal
    17  #  logCollectorSecretName: my-log-collector-secrets
    18  #  initImage: percona/percona-xtradb-cluster-operator:1.10.0
    19  #  enableCRValidationWebhook: true
    20  #  tls:
    21  #    SANs:
    22  #      - pxc-1.example.com
    23  #      - pxc-2.example.com
    24  #      - pxc-3.example.com
    25  #    issuerConf:
    26  #      name: special-selfsigned-issuer
    27  #      kind: ClusterIssuer
    28  #      group: cert-manager.io
    29    allowUnsafeConfigurations: false
    30  #  pause: false
    31    updateStrategy: SmartUpdate
    32    upgradeOptions:
    33      versionServiceEndpoint: https://check.percona.com
    34      apply: 8.0-recommended
    35      schedule: "0 4 * * *"
    36    pxc:
    37      size: 3
    38      image: percona/percona-xtradb-cluster:8.0.23-14.1
    39      autoRecovery: true
    40  #    expose:
    41  #      enabled: true
    42  #      type: LoadBalancer
    43  #      trafficPolicy: Local
    44  #      loadBalancerSourceRanges:
    45  #        - 10.0.0.0/8
    46  #      annotations:
    47  #        networking.gke.io/load-balancer-type: "Internal"
    48  #    replicationChannels:
    49  #    - name: pxc1_to_pxc2
    50  #      isSource: true
    51  #    - name: pxc2_to_pxc1
    52  #      isSource: false
    53  #      configuration:
    54  #        sourceRetryCount: 3
    55  #        sourceConnectRetry: 60
    56  #      sourcesList:
    57  #      - host: 10.95.251.101
    58  #        port: 3306
    59  #        weight: 100
    60  #    schedulerName: mycustom-scheduler
    61  #    readinessDelaySec: 15
    62  #    livenessDelaySec: 600
    63  #    configuration: |
    64  #      [mysqld]
    65  #      wsrep_debug=CLIENT
    66  #      wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
    67  #      [sst]
    68  #      xbstream-opts=--decompress
    69  #      [xtrabackup]
    70  #      compress=lz4
    71  #      for PXC 5.7
    72  #      [xtrabackup]
    73  #      compress
    74  #    imagePullSecrets:
    75  #      - name: private-registry-credentials
    76  #    priorityClassName: high-priority
    77  #    annotations:
    78  #      iam.amazonaws.com/role: role-arn
    79  #    labels:
    80  #      rack: rack-22
    81  #    readinessProbes:
    82  #      initialDelaySeconds: 15
    83  #      timeoutSeconds: 15
    84  #      periodSeconds: 30
    85  #      successThreshold: 1
    86  #      failureThreshold: 5
    87  #    livenessProbes:
    88  #      initialDelaySeconds: 300
    89  #      timeoutSeconds: 5
    90  #      periodSeconds: 10
    91  #      successThreshold: 1
    92  #      failureThreshold: 3
    93  #    containerSecurityContext:
    94  #      privileged: false
    95  #    podSecurityContext:
    96  #      runAsUser: 1001
    97  #      runAsGroup: 1001
    98  #      supplementalGroups: [1001]
    99  #    serviceAccountName: percona-xtradb-cluster-operator-workload
   100  #    imagePullPolicy: Always
   101  #    runtimeClassName: image-rc
   102  #    sidecars:
   103  #    - image: busybox
   104  #      command: ["/bin/sh"]
   105  #      args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
   106  #      name: my-sidecar-1
   107  #      resources:
   108  #        requests:
   109  #          memory: 100M
   110  #          cpu: 100m
   111  #        limits:
   112  #          memory: 200M
   113  #          cpu: 200m
   114  #    envVarsSecret: my-env-var-secrets
   115      resources:
   116        requests:
   117          memory: 1G
   118          cpu: 600m
   119  #        ephemeral-storage: 1G
   120  #      limits:
   121  #        memory: 1G
   122  #        cpu: "1"
   123  #        ephemeral-storage: 1G
   124  #    nodeSelector:
   125  #      disktype: ssd
   126      affinity:
   127        antiAffinityTopologyKey: "kubernetes.io/hostname"
   128  #      advanced:
   129  #        nodeAffinity:
   130  #          requiredDuringSchedulingIgnoredDuringExecution:
   131  #            nodeSelectorTerms:
   132  #            - matchExpressions:
   133  #              - key: kubernetes.io/e2e-az-name
   134  #                operator: In
   135  #                values:
   136  #                - e2e-az1
   137  #                - e2e-az2
   138  #    tolerations:
   139  #    - key: "node.alpha.kubernetes.io/unreachable"
   140  #      operator: "Exists"
   141  #      effect: "NoExecute"
   142  #      tolerationSeconds: 6000
   143      podDisruptionBudget:
   144        maxUnavailable: 1
   145  #      minAvailable: 0
   146      volumeSpec:
   147  #      emptyDir: {}
   148  #      hostPath:
   149  #        path: /data
   150  #        type: Directory
   151        persistentVolumeClaim:
   152  #        storageClassName: standard
   153  #        accessModes: [ "ReadWriteOnce" ]
   154          resources:
   155            requests:
   156              storage: 6G
   157      gracePeriod: 600
   158    haproxy:
   159      enabled: true
   160      size: 3
   161      image: perconalab/percona-xtradb-cluster-operator:main-haproxy
   162  #    replicasServiceEnabled: false
   163  #    imagePullPolicy: Always
   164  #    schedulerName: mycustom-scheduler
   165  #    configuration: |
   166  #
   167  #    the actual default configuration file can be found here https://github.com/percona/percona-docker/blob/main/haproxy/dockerdir/etc/haproxy/haproxy-global.cfg
   168  #
   169  #      global
   170  #        maxconn 2048
   171  #        external-check
   172  #        insecure-fork-wanted
   173  #        stats socket /etc/haproxy/pxc/haproxy.sock mode 600 expose-fd listeners level admin
   174  #
   175  #      defaults
   176  #        default-server init-addr last,libc,none
   177  #        log global
   178  #        mode tcp
   179  #        retries 10
   180  #        timeout client 28800s
   181  #        timeout connect 100500
   182  #        timeout server 28800s
   183  #
   184  #      frontend galera-in
   185  #        bind *:3309 accept-proxy
   186  #        bind *:3306
   187  #        mode tcp
   188  #        option clitcpka
   189  #        default_backend galera-nodes
   190  #
   191  #      frontend galera-admin-in
   192  #        bind *:33062
   193  #        mode tcp
   194  #        option clitcpka
   195  #        default_backend galera-admin-nodes
   196  #
   197  #      frontend galera-replica-in
   198  #        bind *:3307
   199  #        mode tcp
   200  #        option clitcpka
   201  #        default_backend galera-replica-nodes
   202  #
   203  #      frontend galera-mysqlx-in
   204  #        bind *:33060
   205  #        mode tcp
   206  #        option clitcpka
   207  #        default_backend galera-mysqlx-nodes
   208  #
   209  #      frontend stats
   210  #        bind *:8404
   211  #        mode http
   212  #        option http-use-htx
   213  #        http-request use-service prometheus-exporter if { path /metrics }
   214  #    imagePullSecrets:
   215  #      - name: private-registry-credentials
   216  #    annotations:
   217  #      iam.amazonaws.com/role: role-arn
   218  #    labels:
   219  #      rack: rack-22
   220  #    readinessProbes:
   221  #      initialDelaySeconds: 15
   222  #      timeoutSeconds: 1
   223  #      periodSeconds: 5
   224  #      successThreshold: 1
   225  #      failureThreshold: 3
   226  #    livenessProbes:
   227  #      initialDelaySeconds: 60
   228  #      timeoutSeconds: 5
   229  #      periodSeconds: 30
   230  #      successThreshold: 1
   231  #      failureThreshold: 4
   232  #    serviceType: ClusterIP
   233  #    externalTrafficPolicy: Cluster
   234  #    replicasServiceType: ClusterIP
   235  #    replicasExternalTrafficPolicy: Cluster
   236  #    runtimeClassName: image-rc
   237  #    sidecars:
   238  #    - image: busybox
   239  #      command: ["/bin/sh"]
   240  #      args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
   241  #      name: my-sidecar-1
   242  #      resources:
   243  #        requests:
   244  #          memory: 100M
   245  #          cpu: 100m
   246  #        limits:
   247  #          memory: 200M
   248  #          cpu: 200m
   249  #    envVarsSecret: my-env-var-secrets
   250      resources:
   251        requests:
   252          memory: 1G
   253          cpu: 600m
   254  #      limits:
   255  #        memory: 1G
   256  #        cpu: 700m
   257  #    priorityClassName: high-priority
   258  #    nodeSelector:
   259  #      disktype: ssd
   260  #    sidecarResources:
   261  #      requests:
   262  #        memory: 1G
   263  #        cpu: 500m
   264  #      limits:
   265  #        memory: 2G
   266  #        cpu: 600m
   267  #    serviceAccountName: percona-xtradb-cluster-operator-workload
   268      affinity:
   269        antiAffinityTopologyKey: "kubernetes.io/hostname"
   270  #      advanced:
   271  #        nodeAffinity:
   272  #          requiredDuringSchedulingIgnoredDuringExecution:
   273  #            nodeSelectorTerms:
   274  #            - matchExpressions:
   275  #              - key: kubernetes.io/e2e-az-name
   276  #                operator: In
   277  #                values:
   278  #                - e2e-az1
   279  #                - e2e-az2
   280  #    tolerations:
   281  #    - key: "node.alpha.kubernetes.io/unreachable"
   282  #      operator: "Exists"
   283  #      effect: "NoExecute"
   284  #      tolerationSeconds: 6000
   285      podDisruptionBudget:
   286        maxUnavailable: 1
   287  #      minAvailable: 0
   288      gracePeriod: 30
   289  #    loadBalancerSourceRanges:
   290  #      - 10.0.0.0/8
   291  #    serviceAnnotations:
   292  #      service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
   293  #    serviceLabels:
   294  #      rack: rack-23
   295    proxysql:
   296      enabled: false
   297      size: 3
   298      image: perconalab/percona-xtradb-cluster-operator:main-proxysql
   299  #    imagePullPolicy: Always
   300  #    configuration: |
   301  #      datadir="/var/lib/proxysql"
   302  #
   303  #      admin_variables =
   304  #      {
   305  #        admin_credentials="proxyadmin:admin_password"
   306  #        mysql_ifaces="0.0.0.0:6032"
   307  #        refresh_interval=2000
   308  #
   309  #        cluster_username="proxyadmin"
   310  #        cluster_password="admin_password"
   311  #        checksum_admin_variables=false
   312  #        checksum_ldap_variables=false
   313  #        checksum_mysql_variables=false
   314  #        cluster_check_interval_ms=200
   315  #        cluster_check_status_frequency=100
   316  #        cluster_mysql_query_rules_save_to_disk=true
   317  #        cluster_mysql_servers_save_to_disk=true
   318  #        cluster_mysql_users_save_to_disk=true
   319  #        cluster_proxysql_servers_save_to_disk=true
   320  #        cluster_mysql_query_rules_diffs_before_sync=1
   321  #        cluster_mysql_servers_diffs_before_sync=1
   322  #        cluster_mysql_users_diffs_before_sync=1
   323  #        cluster_proxysql_servers_diffs_before_sync=1
   324  #      }
   325  #
   326  #      mysql_variables=
   327  #      {
   328  #        monitor_password="monitor"
   329  #        monitor_galera_healthcheck_interval=1000
   330  #        threads=2
   331  #        max_connections=2048
   332  #        default_query_delay=0
   333  #        default_query_timeout=10000
   334  #        poll_timeout=2000
   335  #        interfaces="0.0.0.0:3306"
   336  #        default_schema="information_schema"
   337  #        stacksize=1048576
   338  #        connect_timeout_server=10000
   339  #        monitor_history=60000
   340  #        monitor_connect_interval=20000
   341  #        monitor_ping_interval=10000
   342  #        ping_timeout_server=200
   343  #        commands_stats=true
   344  #        sessions_sort=true
   345  #        have_ssl=true
   346  #        ssl_p2s_ca="/etc/proxysql/ssl-internal/ca.crt"
   347  #        ssl_p2s_cert="/etc/proxysql/ssl-internal/tls.crt"
   348  #        ssl_p2s_key="/etc/proxysql/ssl-internal/tls.key"
   349  #        ssl_p2s_cipher="ECDHE-RSA-AES128-GCM-SHA256"
   350  #      }
   351  #    schedulerName: mycustom-scheduler
   352  #    imagePullSecrets:
   353  #      - name: private-registry-credentials
   354  #    annotations:
   355  #      iam.amazonaws.com/role: role-arn
   356  #    labels:
   357  #      rack: rack-22
   358  #    serviceType: ClusterIP
   359  #    externalTrafficPolicy: Cluster
   360  #    runtimeClassName: image-rc
   361  #    sidecars:
   362  #    - image: busybox
   363  #      command: ["/bin/sh"]
   364  #      args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
   365  #      name: my-sidecar-1
   366  #      resources:
   367  #        requests:
   368  #          memory: 100M
   369  #          cpu: 100m
   370  #        limits:
   371  #          memory: 200M
   372  #          cpu: 200m
   373  #    envVarsSecret: my-env-var-secrets
   374      resources:
   375        requests:
   376          memory: 1G
   377          cpu: 600m
   378  #      limits:
   379  #        memory: 1G
   380  #        cpu: 700m
   381  #    priorityClassName: high-priority
   382  #    nodeSelector:
   383  #      disktype: ssd
   384  #    sidecarResources:
   385  #      requests:
   386  #        memory: 1G
   387  #        cpu: 500m
   388  #      limits:
   389  #        memory: 2G
   390  #        cpu: 600m
   391  #    serviceAccountName: percona-xtradb-cluster-operator-workload
   392      affinity:
   393        antiAffinityTopologyKey: "kubernetes.io/hostname"
   394  #      advanced:
   395  #        nodeAffinity:
   396  #          requiredDuringSchedulingIgnoredDuringExecution:
   397  #            nodeSelectorTerms:
   398  #            - matchExpressions:
   399  #              - key: kubernetes.io/e2e-az-name
   400  #                operator: In
   401  #                values:
   402  #                - e2e-az1
   403  #                - e2e-az2
   404  #    tolerations:
   405  #    - key: "node.alpha.kubernetes.io/unreachable"
   406  #      operator: "Exists"
   407  #      effect: "NoExecute"
   408  #      tolerationSeconds: 6000
   409      volumeSpec:
   410  #      emptyDir: {}
   411  #      hostPath:
   412  #        path: /data
   413  #        type: Directory
   414        persistentVolumeClaim:
   415  #        storageClassName: standard
   416  #        accessModes: [ "ReadWriteOnce" ]
   417          resources:
   418            requests:
   419              storage: 2G
   420      podDisruptionBudget:
   421        maxUnavailable: 1
   422  #      minAvailable: 0
   423      gracePeriod: 30
   424  #   loadBalancerSourceRanges:
   425  #     - 10.0.0.0/8
   426  #   serviceAnnotations:
   427  #     service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
   428  #   serviceLabels:
   429  #     rack: rack-23
   430    logcollector:
   431      enabled: true
   432      image: perconalab/percona-xtradb-cluster-operator:main-logcollector
   433  #    configuration: |
   434  #      [OUTPUT]
   435  #           Name  es
   436  #           Match *
   437  #           Host  192.168.2.3
   438  #           Port  9200
   439  #           Index my_index
   440  #           Type  my_type
   441      resources:
   442        requests:
   443          memory: 100M
   444          cpu: 200m
   445    pmm:
   446      enabled: false
   447      image: percona/pmm-client:2.23.0
   448      serverHost: monitoring-service
   449      serverUser: admin
   450  #    pxcParams: "--disable-tablestats-limit=2000"
   451  #    proxysqlParams: "--custom-labels=CUSTOM-LABELS"
   452      resources:
   453        requests:
   454          memory: 150M
   455          cpu: 300m
   456    backup:
   457      image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup
   458  #    serviceAccountName: percona-xtradb-cluster-operator
   459  #    imagePullSecrets:
   460  #      - name: private-registry-credentials
   461      pitr:
   462        enabled: false
   463        storageName: STORAGE-NAME-HERE
   464        timeBetweenUploads: 60
   465  #      resources:
   466  #        requests:
   467  #          memory: 0.1G
   468  #          cpu: 100m
   469  #        limits:
   470  #          memory: 1G
   471  #          cpu: 700m
   472      storages:
   473        s3-us-west:
   474          type: s3
   475          verifyTLS: true
   476  #        nodeSelector:
   477  #          storage: tape
   478  #          backupWorker: 'True'
   479  #        resources:
   480  #          requests:
   481  #            memory: 1G
   482  #            cpu: 600m
   483  #        affinity:
   484  #          nodeAffinity:
   485  #            requiredDuringSchedulingIgnoredDuringExecution:
   486  #              nodeSelectorTerms:
   487  #              - matchExpressions:
   488  #                - key: backupWorker
   489  #                  operator: In
   490  #                  values:
   491  #                  - 'True'
   492  #        tolerations:
   493  #          - key: "backupWorker"
   494  #            operator: "Equal"
   495  #            value: "True"
   496  #            effect: "NoSchedule"
   497  #        annotations:
   498  #          testName: scheduled-backup
   499  #        labels:
   500  #          backupWorker: 'True'
   501  #        schedulerName: 'default-scheduler'
   502  #        priorityClassName: 'high-priority'
   503  #        containerSecurityContext:
   504  #          privileged: true
   505  #        podSecurityContext:
   506  #          fsGroup: 1001
   507  #          supplementalGroups: [1001, 1002, 1003]
   508          s3:
   509            bucket: S3-BACKUP-BUCKET-NAME-HERE
   510            credentialsSecret: my-cluster-name-backup-s3
   511            region: us-west-2
   512        fs-pvc:
   513          type: filesystem
   514  #        nodeSelector:
   515  #          storage: tape
   516  #          backupWorker: 'True'
   517  #        resources:
   518  #          requests:
   519  #            memory: 1G
   520  #            cpu: 600m
   521  #        affinity:
   522  #          nodeAffinity:
   523  #            requiredDuringSchedulingIgnoredDuringExecution:
   524  #              nodeSelectorTerms:
   525  #              - matchExpressions:
   526  #                - key: backupWorker
   527  #                  operator: In
   528  #                  values:
   529  #                  - 'True'
   530  #        tolerations:
   531  #          - key: "backupWorker"
   532  #            operator: "Equal"
   533  #            value: "True"
   534  #            effect: "NoSchedule"
   535  #        annotations:
   536  #          testName: scheduled-backup
   537  #        labels:
   538  #          backupWorker: 'True'
   539  #        schedulerName: 'default-scheduler'
   540  #        priorityClassName: 'high-priority'
   541  #        containerSecurityContext:
   542  #          privileged: true
   543  #        podSecurityContext:
   544  #          fsGroup: 1001
   545  #          supplementalGroups: [1001, 1002, 1003]
   546          volume:
   547            persistentVolumeClaim:
   548  #            storageClassName: standard
   549              accessModes: [ "ReadWriteOnce" ]
   550              resources:
   551                requests:
   552                  storage: 6G
   553      schedule:
   554        - name: "sat-night-backup"
   555          schedule: "0 0 * * 6"
   556          keep: 3
   557          storageName: s3-us-west
   558        - name: "daily-backup"
   559          schedule: "0 0 * * *"
   560          keep: 5
   561          storageName: fs-pvc