github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/controllers/core/session/conv.go (about) 1 package session 2 3 import ( 4 "fmt" 5 "strings" 6 "time" 7 8 v1 "k8s.io/api/core/v1" 9 ctrl "sigs.k8s.io/controller-runtime" 10 11 "github.com/tilt-dev/tilt/internal/engine/buildcontrol" 12 "github.com/tilt-dev/tilt/internal/store/k8sconv" 13 14 "github.com/tilt-dev/tilt/internal/store" 15 "github.com/tilt-dev/tilt/pkg/apis" 16 "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" 17 session "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" 18 "github.com/tilt-dev/tilt/pkg/model" 19 ) 20 21 func (r *Reconciler) targetsForResource(mt *store.ManifestTarget, holds buildcontrol.HoldSet, ci *v1alpha1.SessionCISpec, result *ctrl.Result) []session.Target { 22 var targets []session.Target 23 24 if bt := buildTarget(mt, holds); bt != nil { 25 targets = append(targets, *bt) 26 } 27 28 if rt := r.runtimeTarget(mt, holds, ci, result); rt != nil { 29 targets = append(targets, *rt) 30 } 31 32 return targets 33 } 34 35 func (r *Reconciler) k8sRuntimeTarget(mt *store.ManifestTarget, ci *v1alpha1.SessionCISpec, result *ctrl.Result) *session.Target { 36 krs := mt.State.K8sRuntimeState() 37 if mt.Manifest.PodReadinessMode() == model.PodReadinessIgnore && krs.HasEverDeployedSuccessfully && krs.PodLen() == 0 { 38 // HACK: engine assumes anything with an image will create a pod; PodReadinessIgnore is used in these 39 // instances to avoid getting stuck in pending forever; in reality, there's no "runtime" target being 40 // monitored by Tilt, so instead of faking it, just omit it (note: only applies AFTER first deploy so 41 // that we can determine there are no pods, so it will appear in waiting until then, which is actually 42 // desirable and matches behavior in K8sRuntimeState::RuntimeStatus()) 43 // see https://github.com/tilt-dev/tilt/issues/3619 44 return nil 45 } 46 47 target := &session.Target{ 48 Name: fmt.Sprintf("%s:runtime", mt.Manifest.Name.String()), 49 Type: k8sTargetType(mt), 50 Resources: []string{mt.Manifest.Name.String()}, 51 } 52 53 if mt.State.DisableState == session.DisableStateDisabled { 54 target.State.Disabled = &session.TargetStateDisabled{} 55 return target 56 } 57 58 status := mt.RuntimeStatus() 59 pod := krs.MostRecentPod() 60 phase := v1.PodPhase(pod.Phase) 61 62 // A Target's StartTime / FinishTime is meant to be a total representation 63 // of when the YAML started deploying until when it became ready. We 64 // also want it to persist across pod restarts, so we can use it 65 // to check if the pod is within the grace period. 66 // 67 // Ideally, we'd use KubernetesApply's LastApplyStartTime, but this 68 // is LastSuccessfulDeployTime is good enough. 69 createdAt := apis.NewMicroTime(mt.State.LastSuccessfulDeployTime) 70 k8sGracePeriod := time.Duration(0) 71 if ci != nil && ci.K8sGracePeriod != nil { 72 k8sGracePeriod = ci.K8sGracePeriod.Duration 73 } 74 75 graceStatus := v1alpha1.TargetGraceNotApplicable 76 if k8sGracePeriod > 0 && !createdAt.Time.IsZero() { 77 graceSoFar := r.clock.Since(createdAt.Time) 78 if k8sGracePeriod <= graceSoFar { 79 graceStatus = v1alpha1.TargetGraceExceeded 80 } else { 81 graceStatus = v1alpha1.TargetGraceTolerated 82 83 // Use the ctrl.Result to schedule a reconcile. 84 requeueAfter := k8sGracePeriod - graceSoFar 85 if result.RequeueAfter == 0 || result.RequeueAfter > requeueAfter { 86 result.RequeueAfter = requeueAfter 87 } 88 } 89 } 90 91 if status == v1alpha1.RuntimeStatusOK { 92 if v1.PodSucceeded == phase { 93 target.State.Terminated = &session.TargetStateTerminated{ 94 StartTime: createdAt, 95 } 96 return target 97 } 98 99 target.State.Active = &session.TargetStateActive{ 100 StartTime: createdAt, 101 Ready: true, 102 } 103 return target 104 } 105 106 if status == v1alpha1.RuntimeStatusError { 107 if phase == v1.PodFailed { 108 podErr := strings.Join(pod.Errors, "; ") 109 if podErr == "" { 110 podErr = fmt.Sprintf("Pod %q failed", pod.Name) 111 } 112 target.State.Terminated = &session.TargetStateTerminated{ 113 StartTime: createdAt, 114 Error: podErr, 115 GraceStatus: graceStatus, 116 } 117 return target 118 } 119 120 for _, ctr := range store.AllPodContainers(pod) { 121 if k8sconv.ContainerStatusToRuntimeState(ctr) == v1alpha1.RuntimeStatusError { 122 target.State.Terminated = &session.TargetStateTerminated{ 123 StartTime: apis.NewMicroTime(pod.CreatedAt.Time), 124 Error: fmt.Sprintf("Pod %s in error state due to container %s: %s", 125 pod.Name, ctr.Name, pod.Status), 126 GraceStatus: graceStatus, 127 } 128 return target 129 } 130 } 131 132 target.State.Terminated = &session.TargetStateTerminated{ 133 StartTime: createdAt, 134 Error: "unknown error", 135 GraceStatus: graceStatus, 136 } 137 return target 138 } 139 140 if status == v1alpha1.RuntimeStatusPending { 141 if v1.PodRunning == phase { 142 target.State.Active = &session.TargetStateActive{ 143 StartTime: createdAt, 144 Ready: false, 145 } 146 return target 147 } 148 149 waitReason := pod.Status 150 if waitReason == "" { 151 if pod.Name == "" { 152 waitReason = "waiting-for-pod" 153 } else { 154 waitReason = "unknown" 155 } 156 } 157 target.State.Waiting = &session.TargetStateWaiting{ 158 WaitReason: waitReason, 159 } 160 } 161 162 return target 163 } 164 165 func (r *Reconciler) localServeTarget(mt *store.ManifestTarget, holds buildcontrol.HoldSet) *session.Target { 166 if mt.Manifest.LocalTarget().ServeCmd.Empty() { 167 // there is no serve_cmd, so don't return a runtime target at all 168 // (there will still be a build target from the update cmd) 169 return nil 170 } 171 172 target := &session.Target{ 173 Name: fmt.Sprintf("%s:serve", mt.Manifest.Name.String()), 174 Resources: []string{mt.Manifest.Name.String()}, 175 Type: session.TargetTypeServer, 176 } 177 178 if mt.State.DisableState == session.DisableStateDisabled { 179 target.State.Disabled = &session.TargetStateDisabled{} 180 return target 181 } 182 183 lrs := mt.State.LocalRuntimeState() 184 if runtimeErr := lrs.RuntimeStatusError(); runtimeErr != nil { 185 target.State.Terminated = &session.TargetStateTerminated{ 186 StartTime: apis.NewMicroTime(lrs.StartTime), 187 FinishTime: apis.NewMicroTime(lrs.FinishTime), 188 Error: errToString(runtimeErr), 189 } 190 } else if lrs.PID != 0 { 191 target.State.Active = &session.TargetStateActive{ 192 StartTime: apis.NewMicroTime(lrs.StartTime), 193 Ready: lrs.Ready, 194 } 195 } else if mt.Manifest.TriggerMode.AutoInitial() || mt.State.StartedFirstBuild() { 196 // default to waiting unless this resource has auto_init=False and has never 197 // had a build triggered for other reasons (e.g. trigger_mode=TRIGGER_MODE_AUTO and 198 // a relevant file change or being manually invoked via UI) 199 // the latter case ensures there's no race condition between a build being 200 // triggered and the local process actually being launched 201 // 202 // otherwise, Terminated/Active/Waiting will all be nil, which indicates that 203 // the target is currently inactive 204 target.State.Waiting = waitingFromHolds(mt.Manifest.Name, holds) 205 } 206 207 return target 208 } 209 210 // genericRuntimeTarget creates a target from the RuntimeState interface without any domain-specific considerations. 211 // 212 // This is both used for target types that don't require specialized logic (Docker Compose) as well as a fallback for 213 // any new types that don't have deeper support here. 214 func (r *Reconciler) genericRuntimeTarget(mt *store.ManifestTarget, holds buildcontrol.HoldSet) *session.Target { 215 target := &session.Target{ 216 Name: fmt.Sprintf("%s:runtime", mt.Manifest.Name.String()), 217 Resources: []string{mt.Manifest.Name.String()}, 218 Type: session.TargetTypeServer, 219 } 220 221 if mt.State.DisableState == session.DisableStateDisabled { 222 target.State.Disabled = &session.TargetStateDisabled{} 223 return target 224 } 225 226 runtimeStatus := mt.RuntimeStatus() 227 switch runtimeStatus { 228 case v1alpha1.RuntimeStatusPending: 229 target.State.Waiting = waitingFromHolds(mt.Manifest.Name, holds) 230 case v1alpha1.RuntimeStatusOK: 231 target.State.Active = &session.TargetStateActive{ 232 StartTime: apis.NewMicroTime(mt.State.LastSuccessfulDeployTime), 233 // generic resources have no readiness concept so they're just ready by default 234 // (this also applies to Docker Compose, since we don't support its health checks) 235 Ready: true, 236 } 237 case v1alpha1.RuntimeStatusError: 238 errMsg := errToString(mt.State.RuntimeState.RuntimeStatusError()) 239 if errMsg == "" { 240 errMsg = "Server target %q failed" 241 } 242 target.State.Terminated = &session.TargetStateTerminated{ 243 Error: errMsg, 244 } 245 } 246 247 return target 248 } 249 250 func (r *Reconciler) runtimeTarget(mt *store.ManifestTarget, holds buildcontrol.HoldSet, ci *v1alpha1.SessionCISpec, result *ctrl.Result) *session.Target { 251 if mt.Manifest.IsK8s() { 252 return r.k8sRuntimeTarget(mt, ci, result) 253 } else if mt.Manifest.IsLocal() { 254 return r.localServeTarget(mt, holds) 255 } else { 256 return r.genericRuntimeTarget(mt, holds) 257 } 258 } 259 260 // buildTarget creates a "build" (or update) target for the resource. 261 // 262 // Currently, the engine aggregates many different targets into a single build record, and that's reflected here. 263 // Ideally, as the internals change, more granularity will provided and this might actually return a slice of targets 264 // rather than a single target. For example, a K8s resource might have an image build step and then a deployment (i.e. 265 // kubectl apply) step - currently, both of these will be aggregated together, which can make it harder to diagnose 266 // where something is stuck or slow. 267 func buildTarget(mt *store.ManifestTarget, holds buildcontrol.HoldSet) *session.Target { 268 if mt.Manifest.IsLocal() && mt.Manifest.LocalTarget().UpdateCmdSpec == nil { 269 return nil 270 } 271 272 res := &session.Target{ 273 Name: fmt.Sprintf("%s:update", mt.Manifest.Name.String()), 274 Resources: []string{mt.Manifest.Name.String()}, 275 Type: session.TargetTypeJob, 276 } 277 278 if mt.State.DisableState == session.DisableStateDisabled { 279 res.State.Disabled = &session.TargetStateDisabled{} 280 return res 281 } 282 283 isPending := mt.NextBuildReason() != model.BuildReasonNone 284 currentBuild := mt.State.EarliestCurrentBuild() 285 if isPending { 286 res.State.Waiting = waitingFromHolds(mt.Manifest.Name, holds) 287 } else if !currentBuild.Empty() { 288 res.State.Active = &session.TargetStateActive{ 289 StartTime: apis.NewMicroTime(currentBuild.StartTime), 290 } 291 } else if len(mt.State.BuildHistory) != 0 { 292 lastBuild := mt.State.LastBuild() 293 res.State.Terminated = &session.TargetStateTerminated{ 294 StartTime: apis.NewMicroTime(lastBuild.StartTime), 295 FinishTime: apis.NewMicroTime(lastBuild.FinishTime), 296 Error: errToString(lastBuild.Error), 297 } 298 } 299 300 return res 301 } 302 303 func k8sTargetType(mt *store.ManifestTarget) session.TargetType { 304 if !mt.Manifest.IsK8s() { 305 return "" 306 } 307 308 krs := mt.State.K8sRuntimeState() 309 if krs.PodReadinessMode == model.PodReadinessSucceeded { 310 return session.TargetTypeJob 311 } 312 313 return session.TargetTypeServer 314 } 315 316 func waitingFromHolds(mn model.ManifestName, holds buildcontrol.HoldSet) *session.TargetStateWaiting { 317 // in the API, the reason is not _why_ the target "exists", but rather an explanation for why it's not yet 318 // active and is in a pending state (e.g. waitingFromHolds for dependencies) 319 waitReason := "unknown" 320 if hold, ok := holds[mn]; ok && hold.Reason != store.HoldReasonNone { 321 waitReason = string(hold.Reason) 322 } 323 return &session.TargetStateWaiting{ 324 WaitReason: waitReason, 325 } 326 } 327 328 // tiltfileTarget creates a session.Target object from a Tiltfile ManifestState 329 // 330 // This is slightly different from generic resource handling because there is no 331 // ManifestTarget in the engine for the Tiltfile (just ManifestState) and config 332 // file changes are stored stop level on state, but conceptually it does similar 333 // things. 334 func tiltfileTarget(name model.ManifestName, ms *store.ManifestState) session.Target { 335 target := session.Target{ 336 Name: "tiltfile:update", 337 Resources: []string{name.String()}, 338 Type: session.TargetTypeJob, 339 } 340 341 // Tiltfile is special in engine state and doesn't have a target, just state, so 342 // this logic is largely duplicated from the generic resource build logic 343 if ms.IsBuilding() { 344 target.State.Active = &session.TargetStateActive{ 345 StartTime: apis.NewMicroTime(ms.EarliestCurrentBuild().StartTime), 346 } 347 } else if hasPendingChanges, _ := ms.HasPendingChanges(); hasPendingChanges { 348 target.State.Waiting = &session.TargetStateWaiting{ 349 WaitReason: "config-changed", 350 } 351 } else if len(ms.BuildHistory) != 0 { 352 lastBuild := ms.LastBuild() 353 target.State.Terminated = &session.TargetStateTerminated{ 354 StartTime: apis.NewMicroTime(lastBuild.StartTime), 355 FinishTime: apis.NewMicroTime(lastBuild.FinishTime), 356 Error: errToString(lastBuild.Error), 357 } 358 } else { 359 // given the current engine behavior, this doesn't actually occur because 360 // the first build happens as part of initialization 361 target.State.Waiting = &session.TargetStateWaiting{ 362 WaitReason: "initial-build", 363 } 364 } 365 366 return target 367 }