github.com/kubeshop/testkube@v1.17.23/contrib/executor/jmeterd/pkg/slaves/client.go (about) 1 package slaves 2 3 import ( 4 "bytes" 5 "context" 6 "encoding/json" 7 "html/template" 8 "strings" 9 "time" 10 11 "github.com/kubeshop/testkube/pkg/ui" 12 13 batchv1 "k8s.io/api/batch/v1" 14 15 "github.com/pkg/errors" 16 v1 "k8s.io/api/core/v1" 17 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 18 "k8s.io/apimachinery/pkg/util/wait" 19 "k8s.io/apimachinery/pkg/util/yaml" 20 "k8s.io/client-go/kubernetes" 21 kyaml "sigs.k8s.io/kustomize/kyaml/yaml" 22 "sigs.k8s.io/kustomize/kyaml/yaml/merge2" 23 24 "github.com/kubeshop/testkube/pkg/api/v1/testkube" 25 "github.com/kubeshop/testkube/pkg/executor" 26 "github.com/kubeshop/testkube/pkg/executor/output" 27 "github.com/kubeshop/testkube/pkg/utils" 28 ) 29 30 const ( 31 podsTimeout = 5 * time.Minute 32 ) 33 34 type Client struct { 35 clientSet kubernetes.Interface 36 slavesConfigs executor.SlavesConfigs 37 namespace string 38 execution testkube.Execution 39 envs map[string]string 40 envVariables map[string]testkube.Variable 41 } 42 43 type PodOptions struct { 44 Name string 45 Namespace string 46 JobName string 47 JobUID string 48 ActiveDeadlineSeconds int 49 Registry string 50 InitImage string 51 Image string 52 Jsn string 53 CertificateSecret string 54 ServiceAccountName string 55 EnvConfigMaps []testkube.EnvReference 56 EnvSecrets []testkube.EnvReference 57 Ports []v1.ContainerPort 58 Resources *testkube.PodResourcesRequest 59 ImagePullSecrets []string 60 ArtifactRequest *testkube.ArtifactRequest 61 Features testkube.Features 62 NatsUri string 63 LogSidecarImage string 64 RunnerCustomCASecret string 65 } 66 67 // NewClient is a method to create new slave client 68 func NewClient( 69 clientSet kubernetes.Interface, 70 execution testkube.Execution, 71 slavesConfigs executor.SlavesConfigs, 72 envs map[string]string, 73 slavesEnvVariables map[string]testkube.Variable, 74 ) *Client { 75 return &Client{ 76 clientSet: clientSet, 77 slavesConfigs: slavesConfigs, 78 namespace: execution.TestNamespace, 79 execution: execution, 80 envs: envs, 81 envVariables: slavesEnvVariables, 82 } 83 } 84 85 // CreateSlaves creates slaves as per provided count 86 func (c *Client) CreateSlaves(ctx context.Context, count int) (SlaveMeta, error) { 87 output.PrintLogf("%s Creating slave pods: %d", ui.IconRocket, count) 88 podIPAddressChan := make(chan map[string]string, count) 89 errorChan := make(chan error, count) 90 podIPAddresses := make(map[string]string) 91 92 for i := 1; i <= count; i++ { 93 go c.createSlavePod(ctx, i, podIPAddressChan, errorChan) 94 } 95 96 for i := 0; i < count; i++ { 97 select { 98 case ipAddress := <-podIPAddressChan: 99 for podName, podIp := range ipAddress { 100 podIPAddresses[podName] = podIp 101 } 102 case err := <-errorChan: 103 if err != nil { 104 return nil, errors.Wrap(err, "error while creating and resolving slave pod IP addresses") 105 } 106 } 107 } 108 109 output.PrintLog("Successfully resolved slave pods IP addresses") 110 111 slaveMeta := SlaveMeta(podIPAddresses) 112 return slaveMeta, nil 113 } 114 115 // createSlavePod creates a slave pod and sends its IP address on the podIPAddressChan 116 // channel when the pod is in the ready state. 117 func (c *Client) createSlavePod(ctx context.Context, slavesPodNumber int, podIPAddressChan chan<- map[string]string, errorChan chan<- error) { 118 slavePod, err := c.getSlavePodConfiguration(ctx, slavesPodNumber) 119 if err != nil { 120 errorChan <- err 121 return 122 } 123 124 p, err := c.clientSet.CoreV1().Pods(c.namespace).Create(ctx, slavePod, metav1.CreateOptions{}) 125 if err != nil { 126 errorChan <- err 127 return 128 } 129 130 // Wait for the pod to become ready 131 conditionFunc := isPodReady(c.clientSet, p.Name, c.namespace) 132 133 if err = wait.PollUntilContextTimeout(ctx, time.Second, podsTimeout, true, conditionFunc); err != nil { 134 errorChan <- err 135 return 136 } 137 138 p, err = c.clientSet.CoreV1().Pods(c.namespace).Get(ctx, p.Name, metav1.GetOptions{}) 139 if err != nil { 140 errorChan <- err 141 return 142 } 143 podNameIPMap := map[string]string{ 144 p.Name: p.Status.PodIP, 145 } 146 podIPAddressChan <- podNameIPMap 147 } 148 149 func (c *Client) getSlavePodConfiguration(ctx context.Context, SlavePodNumber int) (*v1.Pod, error) { 150 runnerExecutionStr, err := json.Marshal(c.execution) 151 if err != nil { 152 return nil, errors.Wrap(err, "error marshalling runner execution") 153 } 154 155 podName := validateAndGetSlavePodName(c.execution.Name, c.execution.Id, SlavePodNumber) 156 if err != nil { 157 return nil, errors.Wrap(err, "error validating slave pod name") 158 } 159 160 executorJob, err := c.clientSet.BatchV1().Jobs(c.namespace).Get(ctx, c.execution.Id, metav1.GetOptions{}) 161 if err != nil { 162 output.PrintLogf("%s Failed to fetch Test Job info: %v", ui.IconWarning, err.Error()) 163 } 164 165 return c.createSlavePodObject(runnerExecutionStr, podName, executorJob, SlavePodNumber) 166 } 167 168 func (c *Client) createSlavePodObject(runnerExecutionStr []byte, podName string, executorJob *batchv1.Job, slavePodNumber int) (*v1.Pod, error) { 169 tmpl, err := utils. 170 NewTemplate("pod"). 171 Funcs(template.FuncMap{"vartypeptrtostring": testkube.VariableTypeString}). 172 Parse(c.slavesConfigs.SlavePodTemplate) 173 if err != nil { 174 return nil, errors.Errorf("error creating pod spec from SlavePodTemplate: %v", err) 175 } 176 177 // TODO: Figure out a better approach which also works for localdev 178 if executorJob == nil { 179 executorJob = &batchv1.Job{} 180 } 181 podOptions := c.newPodOptions(runnerExecutionStr, podName, *executorJob) 182 var buffer bytes.Buffer 183 podOptions.Jsn = strings.ReplaceAll(podOptions.Jsn, "'", "''") 184 if err = tmpl.ExecuteTemplate(&buffer, "pod", podOptions); err != nil { 185 return nil, errors.Errorf("executing pod spec template: %v", err) 186 } 187 188 var pod v1.Pod 189 podSpec := buffer.String() 190 if c.execution.SlavePodRequest != nil && c.execution.SlavePodRequest.PodTemplate != "" { 191 tmplExt, err := utils.NewTemplate("podExt").Funcs(template.FuncMap{"vartypeptrtostring": testkube.VariableTypeString}). 192 Parse(c.execution.SlavePodRequest.PodTemplate) 193 if err != nil { 194 return nil, errors.Errorf("creating pod extensions spec from template error: %v", err) 195 } 196 197 var bufferExt bytes.Buffer 198 if err = tmplExt.ExecuteTemplate(&bufferExt, "podExt", podOptions); err != nil { 199 return nil, errors.Errorf("executing pod extensions spec template: %v", err) 200 } 201 202 if podSpec, err = merge2.MergeStrings(bufferExt.String(), podSpec, false, kyaml.MergeOptions{}); err != nil { 203 return nil, errors.Errorf("merging pod spec templates: %v", err) 204 } 205 } 206 207 decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewBufferString(podSpec), len(podSpec)) 208 if err := decoder.Decode(&pod); err != nil { 209 return nil, errors.Errorf("decoding pod spec error: %v", err) 210 } 211 212 labels := map[string]string{ 213 // Execution ID is the only unique field in case of multiple runs of the same test 214 // So this is the only field which can tag the slave pods to actual job of jmeterd executor 215 "testkube.io/managed-by": c.execution.Id, 216 "testkube.io/test-name": c.execution.TestName, 217 } 218 for key, value := range labels { 219 if pod.Labels == nil { 220 pod.Labels = make(map[string]string) 221 } 222 223 pod.Labels[key] = value 224 } 225 226 for i := range pod.Spec.InitContainers { 227 pod.Spec.InitContainers[i].Env = append(pod.Spec.InitContainers[i].Env, getSlaveRunnerEnv(c.envs, c.execution)...) 228 } 229 230 for i := range pod.Spec.Containers { 231 pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, getSlaveConfigurationEnv(c.envVariables, slavePodNumber)...) 232 } 233 234 return &pod, nil 235 } 236 237 func (c *Client) DeleteSlaves(ctx context.Context, meta SlaveMeta) error { 238 for _, name := range meta.Names() { 239 output.PrintLogf("%s Cleaning up slave pods after test run: %v", ui.IconSuggestion, name) 240 err := c.clientSet.CoreV1().Pods(c.namespace).Delete(ctx, name, metav1.DeleteOptions{}) 241 if err != nil { 242 output.PrintLogf("%s Failed to cleanup slave pods: %v", ui.IconCross, err.Error()) 243 return err 244 } 245 246 } 247 return nil 248 } 249 250 func (c *Client) newPodOptions(runnerExecutionStr []byte, podName string, executorJob batchv1.Job) *PodOptions { 251 var resources *testkube.PodResourcesRequest 252 if c.execution.SlavePodRequest != nil { 253 resources = c.execution.SlavePodRequest.Resources 254 } 255 256 var artifactRequest *testkube.ArtifactRequest 257 if c.execution.ArtifactRequest != nil && c.execution.ArtifactRequest.SharedBetweenPods { 258 artifactRequest = c.execution.ArtifactRequest 259 } 260 261 return &PodOptions{ 262 Name: podName, 263 Namespace: c.namespace, 264 JobName: executorJob.Name, 265 JobUID: string(executorJob.UID), 266 ActiveDeadlineSeconds: c.slavesConfigs.ActiveDeadlineSeconds, 267 Registry: c.slavesConfigs.Images.Registry, 268 InitImage: c.slavesConfigs.Images.Init, 269 Image: c.slavesConfigs.Images.Slave, 270 Jsn: string(runnerExecutionStr), 271 CertificateSecret: c.slavesConfigs.CertificateSecret, 272 ServiceAccountName: c.slavesConfigs.ServiceAccountName, 273 EnvConfigMaps: c.slavesConfigs.EnvConfigMaps, 274 EnvSecrets: c.slavesConfigs.EnvSecrets, 275 Ports: []v1.ContainerPort{ 276 { 277 ContainerPort: serverPort, 278 Name: "server-port", 279 }, { 280 ContainerPort: localPort, 281 Name: "local-port", 282 }, 283 }, 284 Resources: resources, 285 ImagePullSecrets: c.slavesConfigs.ImagePullSecrets, 286 ArtifactRequest: artifactRequest, 287 Features: c.slavesConfigs.Features, 288 NatsUri: c.slavesConfigs.NatsUri, 289 LogSidecarImage: c.slavesConfigs.LogSidecarImage, 290 RunnerCustomCASecret: c.slavesConfigs.RunnerCustomCASecret, 291 } 292 } 293 294 var _ Interface = (*Client)(nil)