github.com/oam-dev/kubevela@v1.9.11/pkg/appfile/testdata/backport-1-2/apprev2.yaml (about)

     1  apiVersion: core.oam.dev/v1beta1
     2  kind: ApplicationRevision
     3  metadata:
     4    annotations:
     5      app.oam.dev/publishVersion: workflow-default-123456
     6    name: backport-1-2-test-demo-v1
     7    namespace: default
     8  spec:
     9    application:
    10      apiVersion: core.oam.dev/v1beta1
    11      kind: Application
    12      metadata:
    13        annotations:
    14          app.oam.dev/publishVersion: workflow-default-123456
    15        name: backport-1-2-test-demo
    16        namespace: default
    17      spec:
    18        components:
    19          - name: backport-1-2-test-demo
    20            properties:
    21              image: nginx
    22            traits:
    23              - properties:
    24                  replicas: 1
    25                type: scaler
    26            type: webservice
    27        workflow:
    28          steps:
    29            - name: apply-component
    30              type: apply-component
    31              properties:
    32                name: backport-1-2-test-demo
    33            - name: apply1
    34              type: apply-application
    35            - name: apply2
    36              type: apply-application
    37      status: {}
    38    componentDefinitions:
    39      webservice:
    40        apiVersion: core.oam.dev/v1beta1
    41        kind: ComponentDefinition
    42        metadata:
    43          annotations:
    44            definition.oam.dev/description: Describes long-running, scalable, containerized
    45              services that have a stable network endpoint to receive external network
    46              traffic from customers.
    47            meta.helm.sh/release-name: kubevela
    48            meta.helm.sh/release-namespace: vela-system
    49          labels:
    50            app.kubernetes.io/managed-by: Helm
    51          name: webservice
    52          namespace: vela-system
    53        spec:
    54          schematic:
    55            cue:
    56              template: "import (\n\t\"strconv\"\n)\n\nmountsArray: {\n\tpvc: *[\n\t\tfor
    57                v in parameter.volumeMounts.pvc {\n\t\t\t{\n\t\t\t\tmountPath: v.mountPath\n\t\t\t\tname:
    58                \     v.name\n\t\t\t}\n\t\t},\n\t] | []\n\n\tconfigMap: *[\n\t\t\tfor
    59                v in parameter.volumeMounts.configMap {\n\t\t\t{\n\t\t\t\tmountPath:
    60                v.mountPath\n\t\t\t\tname:      v.name\n\t\t\t}\n\t\t},\n\t] | []\n\n\tsecret:
    61                *[\n\t\tfor v in parameter.volumeMounts.secret {\n\t\t\t{\n\t\t\t\tmountPath:
    62                v.mountPath\n\t\t\t\tname:      v.name\n\t\t\t}\n\t\t},\n\t] | []\n\n\temptyDir:
    63                *[\n\t\t\tfor v in parameter.volumeMounts.emptyDir {\n\t\t\t{\n\t\t\t\tmountPath:
    64                v.mountPath\n\t\t\t\tname:      v.name\n\t\t\t}\n\t\t},\n\t] | []\n\n\thostPath:
    65                *[\n\t\t\tfor v in parameter.volumeMounts.hostPath {\n\t\t\t{\n\t\t\t\tmountPath:
    66                v.mountPath\n\t\t\t\tname:      v.name\n\t\t\t}\n\t\t},\n\t] | []\n}\nvolumesArray:
    67                {\n\tpvc: *[\n\t\tfor v in parameter.volumeMounts.pvc {\n\t\t\t{\n\t\t\t\tname:
    68                v.name\n\t\t\t\tpersistentVolumeClaim: claimName: v.claimName\n\t\t\t}\n\t\t},\n\t]
    69                | []\n\n\tconfigMap: *[\n\t\t\tfor v in parameter.volumeMounts.configMap
    70                {\n\t\t\t{\n\t\t\t\tname: v.name\n\t\t\t\tconfigMap: {\n\t\t\t\t\tdefaultMode:
    71                v.defaultMode\n\t\t\t\t\tname:        v.cmName\n\t\t\t\t\tif v.items
    72                != _|_ {\n\t\t\t\t\t\titems: v.items\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t]
    73                | []\n\n\tsecret: *[\n\t\tfor v in parameter.volumeMounts.secret {\n\t\t\t{\n\t\t\t\tname:
    74                v.name\n\t\t\t\tsecret: {\n\t\t\t\t\tdefaultMode: v.defaultMode\n\t\t\t\t\tsecretName:
    75                \ v.secretName\n\t\t\t\t\tif v.items != _|_ {\n\t\t\t\t\t\titems: v.items\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t]
    76                | []\n\n\temptyDir: *[\n\t\t\tfor v in parameter.volumeMounts.emptyDir
    77                {\n\t\t\t{\n\t\t\t\tname: v.name\n\t\t\t\temptyDir: medium: v.medium\n\t\t\t}\n\t\t},\n\t]
    78                | []\n\n\thostPath: *[\n\t\t\tfor v in parameter.volumeMounts.hostPath
    79                {\n\t\t\t{\n\t\t\t\tname: v.name\n\t\t\t\thostPath: path: v.path\n\t\t\t}\n\t\t},\n\t]
    80                | []\n}\noutput: {\n\tapiVersion: \"apps/v1\"\n\tkind:       \"Deployment\"\n\tspec:
    81                {\n\t\tselector: matchLabels: \"app.oam.dev/component\": context.name\n\n\t\ttemplate:
    82                {\n\t\t\tmetadata: {\n\t\t\t\tlabels: {\n\t\t\t\t\tif parameter.labels
    83                != _|_ {\n\t\t\t\t\t\tparameter.labels\n\t\t\t\t\t}\n\t\t\t\t\tif parameter.addRevisionLabel
    84                {\n\t\t\t\t\t\t\"app.oam.dev/revision\": context.revision\n\t\t\t\t\t}\n\t\t\t\t\t\"app.oam.dev/component\":
    85                context.name\n\t\t\t\t}\n\t\t\t\tif parameter.annotations != _|_ {\n\t\t\t\t\tannotations:
    86                parameter.annotations\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tspec: {\n\t\t\t\tcontainers:
    87                [{\n\t\t\t\t\tname:  context.name\n\t\t\t\t\timage: parameter.image\n\t\t\t\t\tif
    88                parameter[\"port\"] != _|_ && parameter[\"ports\"] == _|_ {\n\t\t\t\t\t\tports:
    89                [{\n\t\t\t\t\t\t\tcontainerPort: parameter.port\n\t\t\t\t\t\t}]\n\t\t\t\t\t}\n\t\t\t\t\tif
    90                parameter[\"ports\"] != _|_ {\n\t\t\t\t\t\tports: [ for v in parameter.ports
    91                {\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tcontainerPort: v.port\n\t\t\t\t\t\t\t\tprotocol:
    92                \     v.protocol\n\t\t\t\t\t\t\t\tif v.name != _|_ {\n\t\t\t\t\t\t\t\t\tname:
    93                v.name\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif v.name == _|_ {\n\t\t\t\t\t\t\t\t\tname:
    94                \"port-\" + strconv.FormatInt(v.port, 10)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}}]\n\t\t\t\t\t}\n\n\t\t\t\t\tif
    95                parameter[\"imagePullPolicy\"] != _|_ {\n\t\t\t\t\t\timagePullPolicy:
    96                parameter.imagePullPolicy\n\t\t\t\t\t}\n\n\t\t\t\t\tif parameter[\"cmd\"]
    97                != _|_ {\n\t\t\t\t\t\tcommand: parameter.cmd\n\t\t\t\t\t}\n\n\t\t\t\t\tif
    98                parameter[\"env\"] != _|_ {\n\t\t\t\t\t\tenv: parameter.env\n\t\t\t\t\t}\n\n\t\t\t\t\tif
    99                context[\"config\"] != _|_ {\n\t\t\t\t\t\tenv: context.config\n\t\t\t\t\t}\n\n\t\t\t\t\tif
   100                parameter[\"cpu\"] != _|_ {\n\t\t\t\t\t\tresources: {\n\t\t\t\t\t\t\tlimits:
   101                cpu:   parameter.cpu\n\t\t\t\t\t\t\trequests: cpu: parameter.cpu\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif
   102                parameter[\"memory\"] != _|_ {\n\t\t\t\t\t\tresources: {\n\t\t\t\t\t\t\tlimits:
   103                memory:   parameter.memory\n\t\t\t\t\t\t\trequests: memory: parameter.memory\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif
   104                parameter[\"volumes\"] != _|_ && parameter[\"volumeMounts\"] == _|_
   105                {\n\t\t\t\t\t\tvolumeMounts: [ for v in parameter.volumes {\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tmountPath:
   106                v.mountPath\n\t\t\t\t\t\t\t\tname:      v.name\n\t\t\t\t\t\t\t}}]\n\t\t\t\t\t}\n\n\t\t\t\t\tif
   107                parameter[\"volumeMounts\"] != _|_ {\n\t\t\t\t\t\tvolumeMounts: mountsArray.pvc
   108                + mountsArray.configMap + mountsArray.secret + mountsArray.emptyDir
   109                + mountsArray.hostPath\n\t\t\t\t\t}\n\n\t\t\t\t\tif parameter[\"livenessProbe\"]
   110                != _|_ {\n\t\t\t\t\t\tlivenessProbe: parameter.livenessProbe\n\t\t\t\t\t}\n\n\t\t\t\t\tif
   111                parameter[\"readinessProbe\"] != _|_ {\n\t\t\t\t\t\treadinessProbe:
   112                parameter.readinessProbe\n\t\t\t\t\t}\n\n\t\t\t\t}]\n\n\t\t\t\tif parameter[\"hostAliases\"]
   113                != _|_ {\n\t\t\t\t\t// +patchKey=ip\n\t\t\t\t\thostAliases: parameter.hostAliases\n\t\t\t\t}\n\n\t\t\t\tif
   114                parameter[\"imagePullSecrets\"] != _|_ {\n\t\t\t\t\timagePullSecrets:
   115                [ for v in parameter.imagePullSecrets {\n\t\t\t\t\t\tname: v\n\t\t\t\t\t},\n\t\t\t\t\t]\n\t\t\t\t}\n\n\t\t\t\tif
   116                parameter[\"volumes\"] != _|_ && parameter[\"volumeMounts\"] == _|_
   117                {\n\t\t\t\t\tvolumes: [ for v in parameter.volumes {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tname:
   118                v.name\n\t\t\t\t\t\t\tif v.type == \"pvc\" {\n\t\t\t\t\t\t\t\tpersistentVolumeClaim:
   119                claimName: v.claimName\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif v.type ==
   120                \"configMap\" {\n\t\t\t\t\t\t\t\tconfigMap: {\n\t\t\t\t\t\t\t\t\tdefaultMode:
   121                v.defaultMode\n\t\t\t\t\t\t\t\t\tname:        v.cmName\n\t\t\t\t\t\t\t\t\tif
   122                v.items != _|_ {\n\t\t\t\t\t\t\t\t\t\titems: v.items\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif
   123                v.type == \"secret\" {\n\t\t\t\t\t\t\t\tsecret: {\n\t\t\t\t\t\t\t\t\tdefaultMode:
   124                v.defaultMode\n\t\t\t\t\t\t\t\t\tsecretName:  v.secretName\n\t\t\t\t\t\t\t\t\tif
   125                v.items != _|_ {\n\t\t\t\t\t\t\t\t\t\titems: v.items\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif
   126                v.type == \"emptyDir\" {\n\t\t\t\t\t\t\t\temptyDir: medium: v.medium\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}]\n\t\t\t\t}\n\n\t\t\t\tif
   127                parameter[\"volumeMounts\"] != _|_ {\n\t\t\t\t\tvolumes: volumesArray.pvc
   128                + volumesArray.configMap + volumesArray.secret + volumesArray.emptyDir
   129                + volumesArray.hostPath\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\nexposePorts:
   130                [\n\tfor v in parameter.ports if v.expose == true {\n\t\tport:       v.port\n\t\ttargetPort:
   131                v.port\n\t\tif v.name != _|_ {\n\t\t\tname: v.name\n\t\t}\n\t\tif v.name
   132                == _|_ {\n\t\t\tname: \"port-\" + strconv.FormatInt(v.port, 10)\n\t\t}\n\t},\n]\noutputs:
   133                {\n\tif len(exposePorts) != 0 {\n\t\twebserviceExpose: {\n\t\t\tapiVersion:
   134                \"v1\"\n\t\t\tkind:       \"Service\"\n\t\t\tmetadata: name: context.name\n\t\t\tspec:
   135                {\n\t\t\t\tselector: \"app.oam.dev/component\": context.name\n\t\t\t\tports:
   136                exposePorts\n\t\t\t\ttype:  parameter.exposeType\n\t\t\t}\n\t\t}\n\t}\n}\nparameter:
   137                {\n\t// +usage=Specify the labels in the workload\n\tlabels?: [string]:
   138                string\n\n\t// +usage=Specify the annotations in the workload\n\tannotations?:
   139                [string]: string\n\n\t// +usage=Which image would you like to use for
   140                your service\n\t// +short=i\n\timage: string\n\n\t// +usage=Specify
   141                image pull policy for your service\n\timagePullPolicy?: \"Always\" |
   142                \"Never\" | \"IfNotPresent\"\n\n\t// +usage=Specify image pull secrets
   143                for your service\n\timagePullSecrets?: [...string]\n\n\t// +ignore\n\t//
   144                +usage=Deprecated field, please use ports instead\n\t// +short=p\n\tport?:
   145                int\n\n\t// +usage=Which ports do you want customer traffic sent to,
   146                defaults to 80\n\tports?: [...{\n\t\t// +usage=Number of port to expose
   147                on the pod's IP address\n\t\tport: int\n\t\t// +usage=Name of the port\n\t\tname?:
   148                string\n\t\t// +usage=Protocol for port. Must be UDP, TCP, or SCTP\n\t\tprotocol:
   149                *\"TCP\" | \"UDP\" | \"SCTP\"\n\t\t// +usage=Specify if the port should
   150                be exposed\n\t\texpose: *false | bool\n\t}]\n\n\t// +ignore\n\t// +usage=Specify
   151                what kind of Service you want. options: \"ClusterIP\", \"NodePort\",
   152                \"LoadBalancer\", \"ExternalName\"\n\texposeType: *\"ClusterIP\" | \"NodePort\"
   153                | \"LoadBalancer\" | \"ExternalName\"\n\n\t// +ignore\n\t// +usage=If
   154                addRevisionLabel is true, the revision label will be added to the underlying
   155                pods\n\taddRevisionLabel: *false | bool\n\n\t// +usage=Commands to run
   156                in the container\n\tcmd?: [...string]\n\n\t// +usage=Define arguments
   157                by using environment variables\n\tenv?: [...{\n\t\t// +usage=Environment
   158                variable name\n\t\tname: string\n\t\t// +usage=The value of the environment
   159                variable\n\t\tvalue?: string\n\t\t// +usage=Specifies a source the value
   160                of this var should come from\n\t\tvalueFrom?: {\n\t\t\t// +usage=Selects
   161                a key of a secret in the pod's namespace\n\t\t\tsecretKeyRef?: {\n\t\t\t\t//
   162                +usage=The name of the secret in the pod's namespace to select from\n\t\t\t\tname:
   163                string\n\t\t\t\t// +usage=The key of the secret to select from. Must
   164                be a valid secret key\n\t\t\t\tkey: string\n\t\t\t}\n\t\t\t// +usage=Selects
   165                a key of a config map in the pod's namespace\n\t\t\tconfigMapKeyRef?:
   166                {\n\t\t\t\t// +usage=The name of the config map in the pod's namespace
   167                to select from\n\t\t\t\tname: string\n\t\t\t\t// +usage=The key of the
   168                config map to select from. Must be a valid secret key\n\t\t\t\tkey:
   169                string\n\t\t\t}\n\t\t}\n\t}]\n\n\t// +usage=Number of CPU units for
   170                the service, like \n\tcpu?: string\n\n\t//
   171                +usage=Specifies the attributes of the memory resource required for
   172                the container.\n\tmemory?: string\n\n\tvolumeMounts?: {\n\t\t// +usage=Mount
   173                PVC type volume\n\t\tpvc?: [...{\n\t\t\tname:      string\n\t\t\tmountPath:
   174                string\n\t\t\t// +usage=The name of the PVC\n\t\t\tclaimName: string\n\t\t}]\n\t\t//
   175                +usage=Mount ConfigMap type volume\n\t\tconfigMap?: [...{\n\t\t\tname:
   176                \       string\n\t\t\tmountPath:   string\n\t\t\tdefaultMode: *420 |
   177                int\n\t\t\tcmName:      string\n\t\t\titems?: [...{\n\t\t\t\tkey:  string\n\t\t\t\tpath:
   178                string\n\t\t\t\tmode: *511 | int\n\t\t\t}]\n\t\t}]\n\t\t// +usage=Mount
   179                Secret type volume\n\t\tsecret?: [...{\n\t\t\tname:        string\n\t\t\tmountPath:
   180                \  string\n\t\t\tdefaultMode: *420 | int\n\t\t\tsecretName:  string\n\t\t\titems?:
   181                [...{\n\t\t\t\tkey:  string\n\t\t\t\tpath: string\n\t\t\t\tmode: *511
   182                | int\n\t\t\t}]\n\t\t}]\n\t\t// +usage=Mount EmptyDir type volume\n\t\temptyDir?:
   183                [...{\n\t\t\tname:      string\n\t\t\tmountPath: string\n\t\t\tmedium:
   184                \   *\"\" | \"Memory\"\n\t\t}]\n\t\t// +usage=Mount HostPath type volume\n\t\thostPath?:
   185                [...{\n\t\t\tname:      string\n\t\t\tmountPath: string\n\t\t\tpath:
   186                \     string\n\t\t}]\n\t}\n\n\t// +usage=Deprecated field, use volumeMounts
   187                instead.\n\tvolumes?: [...{\n\t\tname:      string\n\t\tmountPath: string\n\t\t//
   188                +usage=Specify volume type, options: \"pvc\",\"configMap\",\"secret\",\"emptyDir\"\n\t\ttype:
   189                \"pvc\" | \"configMap\" | \"secret\" | \"emptyDir\"\n\t\tif type ==
   190                \"pvc\" {\n\t\t\tclaimName: string\n\t\t}\n\t\tif type == \"configMap\"
   191                {\n\t\t\tdefaultMode: *420 | int\n\t\t\tcmName:      string\n\t\t\titems?:
   192                [...{\n\t\t\t\tkey:  string\n\t\t\t\tpath: string\n\t\t\t\tmode: *511
   193                | int\n\t\t\t}]\n\t\t}\n\t\tif type == \"secret\" {\n\t\t\tdefaultMode:
   194                *420 | int\n\t\t\tsecretName:  string\n\t\t\titems?: [...{\n\t\t\t\tkey:
   195                \ string\n\t\t\t\tpath: string\n\t\t\t\tmode: *511 | int\n\t\t\t}]\n\t\t}\n\t\tif
   196                type == \"emptyDir\" {\n\t\t\tmedium: *\"\" | \"Memory\"\n\t\t}\n\t}]\n\n\t//
   197                +usage=Instructions for assessing whether the container is alive.\n\tlivenessProbe?:
   198                #HealthProbe\n\n\t// +usage=Instructions for assessing whether the container
   199                is in a suitable state to serve traffic.\n\treadinessProbe?: #HealthProbe\n\n\t//
   200                +usage=Specify the hostAliases to add\n\thostAliases?: [...{\n\t\tip:
   201                string\n\t\thostnames: [...string]\n\t}]\n}\n#HealthProbe: {\n\n\t//
   202                +usage=Instructions for assessing container health by executing a command.
   203                Either this attribute or the httpGet attribute or the tcpSocket attribute
   204                MUST be specified. This attribute is mutually exclusive with both the
   205                httpGet attribute and the tcpSocket attribute.\n\texec?: {\n\t\t// +usage=A
   206                command to be executed inside the container to assess its health. Each
   207                space delimited token of the command is a separate array element. Commands
   208                exiting 0 are considered to be successful probes, whilst all other exit
   209                codes are considered failures.\n\t\tcommand: [...string]\n\t}\n\n\t//
   210                +usage=Instructions for assessing container health by executing an HTTP
   211                GET request. Either this attribute or the exec attribute or the tcpSocket
   212                attribute MUST be specified. This attribute is mutually exclusive with
   213                both the exec attribute and the tcpSocket attribute.\n\thttpGet?: {\n\t\t//
   214                +usage=The endpoint, relative to the port, to which the HTTP GET request
   215                should be directed.\n\t\tpath: string\n\t\t// +usage=The TCP socket
   216                within the container to which the HTTP GET request should be directed.\n\t\tport:
   217                int\n\t\thttpHeaders?: [...{\n\t\t\tname:  string\n\t\t\tvalue: string\n\t\t}]\n\t}\n\n\t//
   218                +usage=Instructions for assessing container health by probing a TCP
   219                socket. Either this attribute or the exec attribute or the httpGet attribute
   220                MUST be specified. This attribute is mutually exclusive with both the
   221                exec attribute and the httpGet attribute.\n\ttcpSocket?: {\n\t\t// +usage=The
   222                TCP socket within the container that should be probed to assess container
   223                health.\n\t\tport: int\n\t}\n\n\t// +usage=Number of seconds after the
   224                container is started before the first probe is initiated.\n\tinitialDelaySeconds:
   225                *0 | int\n\n\t// +usage=How often, in seconds, to execute the probe.\n\tperiodSeconds:
   226                *10 | int\n\n\t// +usage=Number of seconds after which the probe times
   227                out.\n\ttimeoutSeconds: *1 | int\n\n\t// +usage=Minimum consecutive
   228                successes for the probe to be considered successful after having failed.\n\tsuccessThreshold:
   229                *1 | int\n\n\t// +usage=Number of consecutive failures required to determine
   230                the container is not alive (liveness probe) or not ready (readiness
   231                probe).\n\tfailureThreshold: *3 | int\n}\n"
   232          status:
   233            customStatus: "ready: {\n\treadyReplicas: *0 | int\n} & {\n\tif context.output.status.readyReplicas
   234              != _|_ {\n\t\treadyReplicas: context.output.status.readyReplicas\n\t}\n}\nmessage:
   235              \"Ready:\\(ready.readyReplicas)/\\(context.output.spec.replicas)\""
   236            healthPolicy: "ready: {\n\tupdatedReplicas:    *0 | int\n\treadyReplicas:
   237              \     *0 | int\n\treplicas:           *0 | int\n\tobservedGeneration:
   238              *0 | int\n} & {\n\tif context.output.status.updatedReplicas != _|_ {\n\t\tupdatedReplicas:
   239              context.output.status.updatedReplicas\n\t}\n\tif context.output.status.readyReplicas
   240              != _|_ {\n\t\treadyReplicas: context.output.status.readyReplicas\n\t}\n\tif
   241              context.output.status.replicas != _|_ {\n\t\treplicas: context.output.status.replicas\n\t}\n\tif
   242              context.output.status.observedGeneration != _|_ {\n\t\tobservedGeneration:
   243              context.output.status.observedGeneration\n\t}\n}\nisHealth: (context.output.spec.replicas
   244              == ready.readyReplicas) && (context.output.spec.replicas == ready.updatedReplicas)
   245              && (context.output.spec.replicas == ready.replicas) && (ready.observedGeneration
   246              == context.output.metadata.generation || ready.observedGeneration > context.output.metadata.generation)"
   247          workload:
   248            definition:
   249              apiVersion: apps/v1
   250              kind: Deployment
   251            type: deployments.apps
   252        status: {}
   253    traitDefinitions:
   254      scaler:
   255        apiVersion: core.oam.dev/v1beta1
   256        kind: TraitDefinition
   257        metadata:
   258          annotations:
   259            definition.oam.dev/description: Manually scale K8s pod for your workload
   260              which follows the pod spec in path 'spec.template'.
   261            meta.helm.sh/release-name: kubevela
   262            meta.helm.sh/release-namespace: vela-system
   263          labels:
   264            app.kubernetes.io/managed-by: Helm
   265          name: scaler
   266          namespace: vela-system
   267        spec:
   268          appliesToWorkloads:
   269            - '*'
   270          definitionRef:
   271            name: ""
   272          schematic:
   273            cue:
   274              template: "parameter: {\n\t// +usage=Specify the number of workload\n\treplicas:
   275                *1 | int\n}\n// +patchStrategy=retainKeys\npatch: spec: replicas: parameter.replicas\n"
   276        status: {}
   277  status: {}