github.com/oam-dev/kubevela@v1.9.11/pkg/appfile/testdata/backport-1-2/apprev1.yaml (about)

     1  apiVersion: core.oam.dev/v1beta1
     2  kind: ApplicationRevision
     3  metadata:
     4    annotations:
     5      app.oam.dev/publishVersion: workflow-default-123456
     6    name: backport-1-2-test-demo-v1
     7    namespace: default
     8  spec:
     9    application:
    10      apiVersion: core.oam.dev/v1beta1
    11      kind: Application
    12      metadata:
    13        annotations:
    14          app.oam.dev/publishVersion: workflow-default-123456
    15        name: backport-1-2-test-demo
    16        namespace: default
    17      spec:
    18        components:
    19          - name: backport-1-2-test-demo
    20            properties:
    21              image: nginx
    22            traits:
    23              - properties:
    24                  replicas: 1
    25                type: scaler
    26            type: webservice
    27        workflow:
    28          steps:
    29            - name: apply
    30              type: apply-application
    31      status: {}
    32    componentDefinitions:
    33      webservice:
    34        apiVersion: core.oam.dev/v1beta1
    35        kind: ComponentDefinition
    36        metadata:
    37          annotations:
    38            definition.oam.dev/description: Describes long-running, scalable, containerized
    39              services that have a stable network endpoint to receive external network
    40              traffic from customers.
    41            meta.helm.sh/release-name: kubevela
    42            meta.helm.sh/release-namespace: vela-system
    43          labels:
    44            app.kubernetes.io/managed-by: Helm
    45          name: webservice
    46          namespace: vela-system
    47        spec:
    48          schematic:
    49            cue:
    50              template: "import (\n\t\"strconv\"\n)\n\nmountsArray: {\n\tpvc: *[\n\t\tfor
    51                v in parameter.volumeMounts.pvc {\n\t\t\t{\n\t\t\t\tmountPath: v.mountPath\n\t\t\t\tname:
    52                \     v.name\n\t\t\t}\n\t\t},\n\t] | []\n\n\tconfigMap: *[\n\t\t\tfor
    53                v in parameter.volumeMounts.configMap {\n\t\t\t{\n\t\t\t\tmountPath:
    54                v.mountPath\n\t\t\t\tname:      v.name\n\t\t\t}\n\t\t},\n\t] | []\n\n\tsecret:
    55                *[\n\t\tfor v in parameter.volumeMounts.secret {\n\t\t\t{\n\t\t\t\tmountPath:
    56                v.mountPath\n\t\t\t\tname:      v.name\n\t\t\t}\n\t\t},\n\t] | []\n\n\temptyDir:
    57                *[\n\t\t\tfor v in parameter.volumeMounts.emptyDir {\n\t\t\t{\n\t\t\t\tmountPath:
    58                v.mountPath\n\t\t\t\tname:      v.name\n\t\t\t}\n\t\t},\n\t] | []\n\n\thostPath:
    59                *[\n\t\t\tfor v in parameter.volumeMounts.hostPath {\n\t\t\t{\n\t\t\t\tmountPath:
    60                v.mountPath\n\t\t\t\tname:      v.name\n\t\t\t}\n\t\t},\n\t] | []\n}\nvolumesArray:
    61                {\n\tpvc: *[\n\t\tfor v in parameter.volumeMounts.pvc {\n\t\t\t{\n\t\t\t\tname:
    62                v.name\n\t\t\t\tpersistentVolumeClaim: claimName: v.claimName\n\t\t\t}\n\t\t},\n\t]
    63                | []\n\n\tconfigMap: *[\n\t\t\tfor v in parameter.volumeMounts.configMap
    64                {\n\t\t\t{\n\t\t\t\tname: v.name\n\t\t\t\tconfigMap: {\n\t\t\t\t\tdefaultMode:
    65                v.defaultMode\n\t\t\t\t\tname:        v.cmName\n\t\t\t\t\tif v.items
    66                != _|_ {\n\t\t\t\t\t\titems: v.items\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t]
    67                | []\n\n\tsecret: *[\n\t\tfor v in parameter.volumeMounts.secret {\n\t\t\t{\n\t\t\t\tname:
    68                v.name\n\t\t\t\tsecret: {\n\t\t\t\t\tdefaultMode: v.defaultMode\n\t\t\t\t\tsecretName:
    69                \ v.secretName\n\t\t\t\t\tif v.items != _|_ {\n\t\t\t\t\t\titems: v.items\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t]
    70                | []\n\n\temptyDir: *[\n\t\t\tfor v in parameter.volumeMounts.emptyDir
    71                {\n\t\t\t{\n\t\t\t\tname: v.name\n\t\t\t\temptyDir: medium: v.medium\n\t\t\t}\n\t\t},\n\t]
    72                | []\n\n\thostPath: *[\n\t\t\tfor v in parameter.volumeMounts.hostPath
    73                {\n\t\t\t{\n\t\t\t\tname: v.name\n\t\t\t\thostPath: path: v.path\n\t\t\t}\n\t\t},\n\t]
    74                | []\n}\noutput: {\n\tapiVersion: \"apps/v1\"\n\tkind:       \"Deployment\"\n\tspec:
    75                {\n\t\tselector: matchLabels: \"app.oam.dev/component\": context.name\n\n\t\ttemplate:
    76                {\n\t\t\tmetadata: {\n\t\t\t\tlabels: {\n\t\t\t\t\tif parameter.labels
    77                != _|_ {\n\t\t\t\t\t\tparameter.labels\n\t\t\t\t\t}\n\t\t\t\t\tif parameter.addRevisionLabel
    78                {\n\t\t\t\t\t\t\"app.oam.dev/revision\": context.revision\n\t\t\t\t\t}\n\t\t\t\t\t\"app.oam.dev/component\":
    79                context.name\n\t\t\t\t}\n\t\t\t\tif parameter.annotations != _|_ {\n\t\t\t\t\tannotations:
    80                parameter.annotations\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tspec: {\n\t\t\t\tcontainers:
    81                [{\n\t\t\t\t\tname:  context.name\n\t\t\t\t\timage: parameter.image\n\t\t\t\t\tif
    82                parameter[\"port\"] != _|_ && parameter[\"ports\"] == _|_ {\n\t\t\t\t\t\tports:
    83                [{\n\t\t\t\t\t\t\tcontainerPort: parameter.port\n\t\t\t\t\t\t}]\n\t\t\t\t\t}\n\t\t\t\t\tif
    84                parameter[\"ports\"] != _|_ {\n\t\t\t\t\t\tports: [ for v in parameter.ports
    85                {\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tcontainerPort: v.port\n\t\t\t\t\t\t\t\tprotocol:
    86                \     v.protocol\n\t\t\t\t\t\t\t\tif v.name != _|_ {\n\t\t\t\t\t\t\t\t\tname:
    87                v.name\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif v.name == _|_ {\n\t\t\t\t\t\t\t\t\tname:
    88                \"port-\" + strconv.FormatInt(v.port, 10)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}}]\n\t\t\t\t\t}\n\n\t\t\t\t\tif
    89                parameter[\"imagePullPolicy\"] != _|_ {\n\t\t\t\t\t\timagePullPolicy:
    90                parameter.imagePullPolicy\n\t\t\t\t\t}\n\n\t\t\t\t\tif parameter[\"cmd\"]
    91                != _|_ {\n\t\t\t\t\t\tcommand: parameter.cmd\n\t\t\t\t\t}\n\n\t\t\t\t\tif
    92                parameter[\"env\"] != _|_ {\n\t\t\t\t\t\tenv: parameter.env\n\t\t\t\t\t}\n\n\t\t\t\t\tif
    93                context[\"config\"] != _|_ {\n\t\t\t\t\t\tenv: context.config\n\t\t\t\t\t}\n\n\t\t\t\t\tif
    94                parameter[\"cpu\"] != _|_ {\n\t\t\t\t\t\tresources: {\n\t\t\t\t\t\t\tlimits:
    95                cpu:   parameter.cpu\n\t\t\t\t\t\t\trequests: cpu: parameter.cpu\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif
    96                parameter[\"memory\"] != _|_ {\n\t\t\t\t\t\tresources: {\n\t\t\t\t\t\t\tlimits:
    97                memory:   parameter.memory\n\t\t\t\t\t\t\trequests: memory: parameter.memory\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif
    98                parameter[\"volumes\"] != _|_ && parameter[\"volumeMounts\"] == _|_
    99                {\n\t\t\t\t\t\tvolumeMounts: [ for v in parameter.volumes {\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tmountPath:
   100                v.mountPath\n\t\t\t\t\t\t\t\tname:      v.name\n\t\t\t\t\t\t\t}}]\n\t\t\t\t\t}\n\n\t\t\t\t\tif
   101                parameter[\"volumeMounts\"] != _|_ {\n\t\t\t\t\t\tvolumeMounts: mountsArray.pvc
   102                + mountsArray.configMap + mountsArray.secret + mountsArray.emptyDir
   103                + mountsArray.hostPath\n\t\t\t\t\t}\n\n\t\t\t\t\tif parameter[\"livenessProbe\"]
   104                != _|_ {\n\t\t\t\t\t\tlivenessProbe: parameter.livenessProbe\n\t\t\t\t\t}\n\n\t\t\t\t\tif
   105                parameter[\"readinessProbe\"] != _|_ {\n\t\t\t\t\t\treadinessProbe:
   106                parameter.readinessProbe\n\t\t\t\t\t}\n\n\t\t\t\t}]\n\n\t\t\t\tif parameter[\"hostAliases\"]
   107                != _|_ {\n\t\t\t\t\t// +patchKey=ip\n\t\t\t\t\thostAliases: parameter.hostAliases\n\t\t\t\t}\n\n\t\t\t\tif
   108                parameter[\"imagePullSecrets\"] != _|_ {\n\t\t\t\t\timagePullSecrets:
   109                [ for v in parameter.imagePullSecrets {\n\t\t\t\t\t\tname: v\n\t\t\t\t\t},\n\t\t\t\t\t]\n\t\t\t\t}\n\n\t\t\t\tif
   110                parameter[\"volumes\"] != _|_ && parameter[\"volumeMounts\"] == _|_
   111                {\n\t\t\t\t\tvolumes: [ for v in parameter.volumes {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tname:
   112                v.name\n\t\t\t\t\t\t\tif v.type == \"pvc\" {\n\t\t\t\t\t\t\t\tpersistentVolumeClaim:
   113                claimName: v.claimName\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif v.type ==
   114                \"configMap\" {\n\t\t\t\t\t\t\t\tconfigMap: {\n\t\t\t\t\t\t\t\t\tdefaultMode:
   115                v.defaultMode\n\t\t\t\t\t\t\t\t\tname:        v.cmName\n\t\t\t\t\t\t\t\t\tif
   116                v.items != _|_ {\n\t\t\t\t\t\t\t\t\t\titems: v.items\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif
   117                v.type == \"secret\" {\n\t\t\t\t\t\t\t\tsecret: {\n\t\t\t\t\t\t\t\t\tdefaultMode:
   118                v.defaultMode\n\t\t\t\t\t\t\t\t\tsecretName:  v.secretName\n\t\t\t\t\t\t\t\t\tif
   119                v.items != _|_ {\n\t\t\t\t\t\t\t\t\t\titems: v.items\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif
   120                v.type == \"emptyDir\" {\n\t\t\t\t\t\t\t\temptyDir: medium: v.medium\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}]\n\t\t\t\t}\n\n\t\t\t\tif
   121                parameter[\"volumeMounts\"] != _|_ {\n\t\t\t\t\tvolumes: volumesArray.pvc
   122                + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir
   123                + volumesArray.hostPath\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\nexposePorts:
   124                [\n\tfor v in parameter.ports if v.expose == true {\n\t\tport:       v.port\n\t\ttargetPort:
   125                v.port\n\t\tif v.name != _|_ {\n\t\t\tname: v.name\n\t\t}\n\t\tif v.name
   126                == _|_ {\n\t\t\tname: \"port-\" + strconv.FormatInt(v.port, 10)\n\t\t}\n\t},\n]\noutputs:
   127                {\n\tif len(exposePorts) != 0 {\n\t\twebserviceExpose: {\n\t\t\tapiVersion:
   128                \"v1\"\n\t\t\tkind:       \"Service\"\n\t\t\tmetadata: name: context.name\n\t\t\tspec:
   129                {\n\t\t\t\tselector: \"app.oam.dev/component\": context.name\n\t\t\t\tports:
   130                exposePorts\n\t\t\t\ttype:  parameter.exposeType\n\t\t\t}\n\t\t}\n\t}\n}\nparameter:
   131                {\n\t// +usage=Specify the labels in the workload\n\tlabels?: [string]:
   132                string\n\n\t// +usage=Specify the annotations in the workload\n\tannotations?:
   133                [string]: string\n\n\t// +usage=Which image would you like to use for
   134                your service\n\t// +short=i\n\timage: string\n\n\t// +usage=Specify
   135                image pull policy for your service\n\timagePullPolicy?: \"Always\" |
   136                \"Never\" | \"IfNotPresent\"\n\n\t// +usage=Specify image pull secrets
   137                for your service\n\timagePullSecrets?: [...string]\n\n\t// +ignore\n\t//
   138                +usage=Deprecated field, please use ports instead\n\t// +short=p\n\tport?:
   139                int\n\n\t// +usage=Which ports do you want customer traffic sent to,
   140                defaults to 80\n\tports?: [...{\n\t\t// +usage=Number of port to expose
   141                on the pod's IP address\n\t\tport: int\n\t\t// +usage=Name of the port\n\t\tname?:
   142                string\n\t\t// +usage=Protocol for port. Must be UDP, TCP, or SCTP\n\t\tprotocol:
   143                *\"TCP\" | \"UDP\" | \"SCTP\"\n\t\t// +usage=Specify if the port should
   144                be exposed\n\t\texpose: *false | bool\n\t}]\n\n\t// +ignore\n\t// +usage=Specify
   145                what kind of Service you want. options: \"ClusterIP\", \"NodePort\",
   146                \"LoadBalancer\", \"ExternalName\"\n\texposeType: *\"ClusterIP\" | \"NodePort\"
   147                | \"LoadBalancer\" | \"ExternalName\"\n\n\t// +ignore\n\t// +usage=If
   148                addRevisionLabel is true, the revision label will be added to the underlying
   149                pods\n\taddRevisionLabel: *false | bool\n\n\t// +usage=Commands to run
   150                in the container\n\tcmd?: [...string]\n\n\t// +usage=Define arguments
   151                by using environment variables\n\tenv?: [...{\n\t\t// +usage=Environment
   152                variable name\n\t\tname: string\n\t\t// +usage=The value of the environment
   153                variable\n\t\tvalue?: string\n\t\t// +usage=Specifies a source the value
   154                of this var should come from\n\t\tvalueFrom?: {\n\t\t\t// +usage=Selects
   155                a key of a secret in the pod's namespace\n\t\t\tsecretKeyRef?: {\n\t\t\t\t//
   156                +usage=The name of the secret in the pod's namespace to select from\n\t\t\t\tname:
   157                string\n\t\t\t\t// +usage=The key of the secret to select from. Must
   158                be a valid secret key\n\t\t\t\tkey: string\n\t\t\t}\n\t\t\t// +usage=Selects
   159                a key of a config map in the pod's namespace\n\t\t\tconfigMapKeyRef?:
   160                {\n\t\t\t\t// +usage=The name of the config map in the pod's namespace
   161                to select from\n\t\t\t\tname: string\n\t\t\t\t// +usage=The key of the
   162                config map to select from. Must be a valid secret key\n\t\t\t\tkey:
   163                string\n\t\t\t}\n\t\t}\n\t}]\n\n\t// +usage=Number of CPU units for
   164                the service, like \n\tcpu?: string\n\n\t//
   165                +usage=Specifies the attributes of the memory resource required for
   166                the container.\n\tmemory?: string\n\n\tvolumeMounts?: {\n\t\t// +usage=Mount
   167                PVC type volume\n\t\tpvc?: [...{\n\t\t\tname:      string\n\t\t\tmountPath:
   168                string\n\t\t\t// +usage=The name of the PVC\n\t\t\tclaimName: string\n\t\t}]\n\t\t//
   169                +usage=Mount ConfigMap type volume\n\t\tconfigMap?: [...{\n\t\t\tname:
   170                \       string\n\t\t\tmountPath:   string\n\t\t\tdefaultMode: *420 |
   171                int\n\t\t\tcmName:      string\n\t\t\titems?: [...{\n\t\t\t\tkey:  string\n\t\t\t\tpath:
   172                string\n\t\t\t\tmode: *511 | int\n\t\t\t}]\n\t\t}]\n\t\t// +usage=Mount
   173                Secret type volume\n\t\tsecret?: [...{\n\t\t\tname:        string\n\t\t\tmountPath:
   174                \  string\n\t\t\tdefaultMode: *420 | int\n\t\t\tsecretName:  string\n\t\t\titems?:
   175                [...{\n\t\t\t\tkey:  string\n\t\t\t\tpath: string\n\t\t\t\tmode: *511
   176                | int\n\t\t\t}]\n\t\t}]\n\t\t// +usage=Mount EmptyDir type volume\n\t\temptyDir?:
   177                [...{\n\t\t\tname:      string\n\t\t\tmountPath: string\n\t\t\tmedium:
   178                \   *\"\" | \"Memory\"\n\t\t}]\n\t\t// +usage=Mount HostPath type volume\n\t\thostPath?:
   179                [...{\n\t\t\tname:      string\n\t\t\tmountPath: string\n\t\t\tpath:
   180                \     string\n\t\t}]\n\t}\n\n\t// +usage=Deprecated field, use volumeMounts
   181                instead.\n\tvolumes?: [...{\n\t\tname:      string\n\t\tmountPath: string\n\t\t//
   182                +usage=Specify volume type, options: \"pvc\",\"configMap\",\"secret\",\"emptyDir\"\n\t\ttype:
   183                \"pvc\" | \"configMap\" | \"secret\" | \"emptyDir\"\n\t\tif type ==
   184                \"pvc\" {\n\t\t\tclaimName: string\n\t\t}\n\t\tif type == \"configMap\"
   185                {\n\t\t\tdefaultMode: *420 | int\n\t\t\tcmName:      string\n\t\t\titems?:
   186                [...{\n\t\t\t\tkey:  string\n\t\t\t\tpath: string\n\t\t\t\tmode: *511
   187                | int\n\t\t\t}]\n\t\t}\n\t\tif type == \"secret\" {\n\t\t\tdefaultMode:
   188                *420 | int\n\t\t\tsecretName:  string\n\t\t\titems?: [...{\n\t\t\t\tkey:
   189                \ string\n\t\t\t\tpath: string\n\t\t\t\tmode: *511 | int\n\t\t\t}]\n\t\t}\n\t\tif
   190                type == \"emptyDir\" {\n\t\t\tmedium: *\"\" | \"Memory\"\n\t\t}\n\t}]\n\n\t//
   191                +usage=Instructions for assessing whether the container is alive.\n\tlivenessProbe?:
   192                #HealthProbe\n\n\t// +usage=Instructions for assessing whether the container
   193                is in a suitable state to serve traffic.\n\treadinessProbe?: #HealthProbe\n\n\t//
   194                +usage=Specify the hostAliases to add\n\thostAliases?: [...{\n\t\tip:
   195                string\n\t\thostnames: [...string]\n\t}]\n}\n#HealthProbe: {\n\n\t//
   196                +usage=Instructions for assessing container health by executing a command.
   197                Either this attribute or the httpGet attribute or the tcpSocket attribute
   198                MUST be specified. This attribute is mutually exclusive with both the
   199                httpGet attribute and the tcpSocket attribute.\n\texec?: {\n\t\t// +usage=A
   200                command to be executed inside the container to assess its health. Each
   201                space delimited token of the command is a separate array element. Commands
   202                exiting 0 are considered to be successful probes, whilst all other exit
   203                codes are considered failures.\n\t\tcommand: [...string]\n\t}\n\n\t//
   204                +usage=Instructions for assessing container health by executing an HTTP
   205                GET request. Either this attribute or the exec attribute or the tcpSocket
   206                attribute MUST be specified. This attribute is mutually exclusive with
   207                both the exec attribute and the tcpSocket attribute.\n\thttpGet?: {\n\t\t//
   208                +usage=The endpoint, relative to the port, to which the HTTP GET request
   209                should be directed.\n\t\tpath: string\n\t\t// +usage=The TCP socket
   210                within the container to which the HTTP GET request should be directed.\n\t\tport:
   211                int\n\t\thttpHeaders?: [...{\n\t\t\tname:  string\n\t\t\tvalue: string\n\t\t}]\n\t}\n\n\t//
   212                +usage=Instructions for assessing container health by probing a TCP
   213                socket. Either this attribute or the exec attribute or the httpGet attribute
   214                MUST be specified. This attribute is mutually exclusive with both the
   215                exec attribute and the httpGet attribute.\n\ttcpSocket?: {\n\t\t// +usage=The
   216                TCP socket within the container that should be probed to assess container
   217                health.\n\t\tport: int\n\t}\n\n\t// +usage=Number of seconds after the
   218                container is started before the first probe is initiated.\n\tinitialDelaySeconds:
   219                *0 | int\n\n\t// +usage=How often, in seconds, to execute the probe.\n\tperiodSeconds:
   220                *10 | int\n\n\t// +usage=Number of seconds after which the probe times
   221                out.\n\ttimeoutSeconds: *1 | int\n\n\t// +usage=Minimum consecutive
   222                successes for the probe to be considered successful after having failed.\n\tsuccessThreshold:
   223                *1 | int\n\n\t// +usage=Number of consecutive failures required to determine
   224                the container is not alive (liveness probe) or not ready (readiness
   225                probe).\n\tfailureThreshold: *3 | int\n}\n"
   226          status:
   227            customStatus: "ready: {\n\treadyReplicas: *0 | int\n} & {\n\tif context.output.status.readyReplicas
   228              != _|_ {\n\t\treadyReplicas: context.output.status.readyReplicas\n\t}\n}\nmessage:
   229              \"Ready:\\(ready.readyReplicas)/\\(context.output.spec.replicas)\""
   230            healthPolicy: "ready: {\n\tupdatedReplicas:    *0 | int\n\treadyReplicas:
   231              \     *0 | int\n\treplicas:           *0 | int\n\tobservedGeneration:
   232              *0 | int\n} & {\n\tif context.output.status.updatedReplicas != _|_ {\n\t\tupdatedReplicas:
   233              context.output.status.updatedReplicas\n\t}\n\tif context.output.status.readyReplicas
   234              != _|_ {\n\t\treadyReplicas: context.output.status.readyReplicas\n\t}\n\tif
   235              context.output.status.replicas != _|_ {\n\t\treplicas: context.output.status.replicas\n\t}\n\tif
   236              context.output.status.observedGeneration != _|_ {\n\t\tobservedGeneration:
   237              context.output.status.observedGeneration\n\t}\n}\nisHealth: (context.output.spec.replicas
   238              == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas)
   239              && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration
   240              == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)"
   241          workload:
   242            definition:
   243              apiVersion: apps/v1
   244              kind: Deployment
   245            type: deployments.apps
   246        status: {}
   247    traitDefinitions:
   248      scaler:
   249        apiVersion: core.oam.dev/v1beta1
   250        kind: TraitDefinition
   251        metadata:
   252          annotations:
   253            definition.oam.dev/description: Manually scale K8s pod for your workload
   254              which follows the pod spec in path 'spec.template'.
   255            meta.helm.sh/release-name: kubevela
   256            meta.helm.sh/release-namespace: vela-system
   257          labels:
   258            app.kubernetes.io/managed-by: Helm
   259          name: scaler
   260          namespace: vela-system
   261        spec:
   262          appliesToWorkloads:
   263            - '*'
   264          definitionRef:
   265            name: ""
   266          schematic:
   267            cue:
   268              template: "parameter: {\n\t// +usage=Specify the number of workload\n\treplicas:
   269                *1 | int\n}\n// +patchStrategy=retainKeys\npatch: spec: replicas: parameter.replicas\n"
   270        status: {}
   271  status: {}