github.com/kubevela/workflow@v0.6.0/charts/vela-workflow/templates/definitions/chat-gpt.yaml (about)

     1  # Code generated by KubeVela templates. DO NOT EDIT. Please edit the original cue file.
     2  # Definition source cue file: vela-templates/definitions/internal/chat-gpt.cue
     3  apiVersion: core.oam.dev/v1beta1
     4  kind: WorkflowStepDefinition
     5  metadata:
     6    annotations:
     7      custom.definition.oam.dev/category: External Intergration
     8      definition.oam.dev/alias: ""
     9      definition.oam.dev/description: Send request to chat-gpt
    10      definition.oam.dev/example-url: https://raw.githubusercontent.com/kubevela/workflow/main/examples/workflow-run/chat-gpt.yaml
    11    name: chat-gpt
    12    namespace: {{ include "systemDefinitionNamespace" . }}
    13  spec:
    14    schematic:
    15      cue:
    16        template: |
    17          import (
    18          	"vela/op"
    19          	"encoding/json"
    20          	"encoding/base64"
    21          )
    22  
    23          token: op.#Steps & {
    24          	if parameter.token.value != _|_ {
    25          		value: parameter.token.value
    26          	}
    27          	if parameter.token.secretRef != _|_ {
    28          		read: op.#Read & {
    29          			value: {
    30          				apiVersion: "v1"
    31          				kind:       "Secret"
    32          				metadata: {
    33          					name:      parameter.token.secretRef.name
    34          					namespace: context.namespace
    35          				}
    36          			}
    37          		}
    38  
    39          		stringValue: op.#ConvertString & {bt: base64.Decode(null, read.value.data[parameter.token.secretRef.key])}
    40          		value:       stringValue.str
    41          	}
    42          }
    43          http: op.#HTTPDo & {
    44          	method: "POST"
    45          	url:    "https://api.openai.com/v1/chat/completions"
    46          	request: {
    47          		timeout: parameter.timeout
    48          		body:    json.Marshal({
    49          			model: parameter.model
    50          			messages: [{
    51          				if parameter.prompt.type == "custom" {
    52          					content: parameter.prompt.content
    53          				}
    54          				if parameter.prompt.type == "diagnose" {
    55          					content: """
    56          You are a professional kubernetes administrator.
    57          Carefully read the provided information, being certain to spell out the diagnosis & reasoning, and don't skip any steps.
    58          Answer in  \(parameter.prompt.lang).
    59          ---
    60          \(json.Marshal(parameter.prompt.content))
    61          ---
    62          What is wrong with this object and how to fix it?
    63          """
    64          				}
    65          				if parameter.prompt.type == "audit" {
    66          					content: """
    67          You are a professional kubernetes administrator.
    68          You inspect the object and find out the security misconfigurations and give advice.
    69          Write down the possible problems in bullet points, using the imperative tense.
    70          Remember to write only the most important points and do not write more than a few bullet points.
    71          Answer in  \(parameter.prompt.lang).
    72          ---
    73          \(json.Marshal(parameter.prompt.content))
    74          ---
    75          What is the secure problem with this object and how to fix it?
    76          """
    77          				}
    78          				if parameter.prompt.type == "quality-gate" {
    79          					content: """
    80          You are a professional kubernetes administrator.
    81          You inspect the object and find out the security misconfigurations and rate the object. The max score is 100.
    82          Answer with score only.
    83          ---
    84          \(json.Marshal(parameter.prompt.content))
    85          ---
    86          What is the score of this object?
    87          """
    88          				}
    89          				role: "user"
    90          			}]
    91          		})
    92          		header: {
    93          			"Content-Type": "application/json"
    94          			Authorization:  "Bearer \(token.value)"
    95          		}
    96          	}
    97          }
    98          response: json.Unmarshal(http.response.body)
    99          fail:     op.#Steps & {
   100          	if http.response.statusCode >= 400 {
   101          		requestFail: op.#Fail & {
   102          			message: "\(http.response.statusCode): failed to request: \(response.error.message)"
   103          		}
   104          	}
   105          }
   106          result: response.choices[0].message.content
   107          log:    op.#Log & {
   108          	data: result
   109          }
   110          parameter: {
   111          	token: close({
   112          		// +usage=the token value
   113          		value: string
   114          	}) | close({
   115          		secretRef: {
   116          			// +usage=name is the name of the secret
   117          			name: string
   118          			// +usage=key is the token key in the secret
   119          			key: string
   120          		}
   121          	})
   122          	// +usage=the model name
   123          	model: *"gpt-3.5-turbo" | string
   124          	// +usage=the prompt to use
   125          	prompt: {
   126          		type:    *"custom" | "diagnose" | "audit" | "quality-gate"
   127          		lang:    *"English" | "Chinese"
   128          		content: string | {...}
   129          	}
   130          	timeout: *"30s" | string
   131          }
   132