github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/controllers/core/dockercomposeservice/reconciler.go (about) 1 package dockercomposeservice 2 3 import ( 4 "context" 5 "strings" 6 "sync" 7 8 dtypes "github.com/docker/docker/api/types" 9 apierrors "k8s.io/apimachinery/pkg/api/errors" 10 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 "k8s.io/apimachinery/pkg/runtime" 12 "k8s.io/apimachinery/pkg/types" 13 ctrl "sigs.k8s.io/controller-runtime" 14 "sigs.k8s.io/controller-runtime/pkg/builder" 15 "sigs.k8s.io/controller-runtime/pkg/client" 16 ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" 17 "sigs.k8s.io/controller-runtime/pkg/handler" 18 "sigs.k8s.io/controller-runtime/pkg/reconcile" 19 20 "github.com/docker/go-connections/nat" 21 22 "github.com/tilt-dev/tilt/internal/controllers/apicmp" 23 "github.com/tilt-dev/tilt/internal/controllers/apis/configmap" 24 "github.com/tilt-dev/tilt/internal/controllers/apis/imagemap" 25 "github.com/tilt-dev/tilt/internal/controllers/indexer" 26 "github.com/tilt-dev/tilt/internal/docker" 27 "github.com/tilt-dev/tilt/internal/dockercompose" 28 "github.com/tilt-dev/tilt/internal/filteredwriter" 29 "github.com/tilt-dev/tilt/internal/store" 30 "github.com/tilt-dev/tilt/internal/store/dockercomposeservices" 31 "github.com/tilt-dev/tilt/pkg/apis" 32 "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" 33 "github.com/tilt-dev/tilt/pkg/logger" 34 "github.com/tilt-dev/tilt/pkg/model" 35 ) 36 37 type Reconciler struct { 38 dcc dockercompose.DockerComposeClient 39 dc docker.Client 40 st store.RStore 41 ctrlClient ctrlclient.Client 42 indexer *indexer.Indexer 43 requeuer *indexer.Requeuer 44 disableQueue *DisableSubscriber 45 mu sync.Mutex 46 47 // Protected by the mutex. 48 results map[types.NamespacedName]*Result 49 resultsByServiceName map[string]*Result 50 projectWatches map[string]*ProjectWatch 51 } 52 53 func (r *Reconciler) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) { 54 b := ctrl.NewControllerManagedBy(mgr). 55 For(&v1alpha1.DockerComposeService{}). 56 WatchesRawSource(r.requeuer). 57 Watches(&v1alpha1.ImageMap{}, 58 handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)). 59 Watches(&v1alpha1.ConfigMap{}, 60 handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)) 61 62 return b, nil 63 } 64 65 func NewReconciler( 66 ctrlClient ctrlclient.Client, 67 dcc dockercompose.DockerComposeClient, 68 dc docker.Client, 69 st store.RStore, 70 scheme *runtime.Scheme, 71 disableQueue *DisableSubscriber, 72 ) *Reconciler { 73 return &Reconciler{ 74 ctrlClient: ctrlClient, 75 dcc: dcc, 76 dc: dc.ForOrchestrator(model.OrchestratorDC), 77 indexer: indexer.NewIndexer(scheme, indexDockerComposeService), 78 st: st, 79 requeuer: indexer.NewRequeuer(), 80 disableQueue: disableQueue, 81 results: make(map[types.NamespacedName]*Result), 82 resultsByServiceName: make(map[string]*Result), 83 projectWatches: make(map[string]*ProjectWatch), 84 } 85 } 86 87 // Redeploy the docker compose service when its spec 88 // changes or any of its dependencies change. 89 func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { 90 nn := request.NamespacedName 91 92 var obj v1alpha1.DockerComposeService 93 err := r.ctrlClient.Get(ctx, nn, &obj) 94 r.indexer.OnReconcile(nn, &obj) 95 if err != nil && !apierrors.IsNotFound(err) { 96 return ctrl.Result{}, err 97 } 98 99 if apierrors.IsNotFound(err) || !obj.ObjectMeta.DeletionTimestamp.IsZero() { 100 rs, ok := r.updateForDisableQueue(nn, true /* deleting */) 101 if ok { 102 r.disableQueue.UpdateQueue(rs) 103 } 104 r.clearResult(nn) 105 106 r.st.Dispatch(dockercomposeservices.NewDockerComposeServiceDeleteAction(nn.Name)) 107 r.manageOwnedProjectWatches(ctx) 108 return r.manageOwnedLogStream(ctx, nn, nil) 109 } 110 111 r.st.Dispatch(dockercomposeservices.NewDockerComposeServiceUpsertAction(&obj)) 112 113 // Get configmap's disable status 114 ctx = store.MustObjectLogHandler(ctx, r.st, &obj) 115 disableStatus, err := configmap.MaybeNewDisableStatus(ctx, r.ctrlClient, obj.Spec.DisableSource, obj.Status.DisableStatus) 116 if err != nil { 117 return ctrl.Result{}, err 118 } 119 120 r.recordSpecAndDisableStatus(nn, obj.Spec, *disableStatus) 121 122 rs, ok := r.updateForDisableQueue(nn, disableStatus.State == v1alpha1.DisableStateDisabled) 123 if ok { 124 r.disableQueue.UpdateQueue(rs) 125 if disableStatus.State == v1alpha1.DisableStateDisabled { 126 r.recordRmOnDisable(nn) 127 } 128 } 129 130 if disableStatus.State != v1alpha1.DisableStateDisabled { 131 // Fetch all the images needed to apply this YAML. 132 imageMaps, err := imagemap.NamesToObjects(ctx, r.ctrlClient, obj.Spec.ImageMaps) 133 if err != nil { 134 return ctrl.Result{}, err 135 } 136 137 // Apply to the cluster if necessary. 138 if r.shouldDeployOnReconcile(request.NamespacedName, &obj, imageMaps) { 139 // If we have no image dependencies in tilt, tell docker compose 140 // to handle any necessary image builds. 141 dcManagedBuild := len(imageMaps) == 0 142 _ = r.forceApplyHelper(ctx, nn, obj.Spec, imageMaps, dcManagedBuild) 143 } 144 } 145 146 // TODO(nick): Deploy dockercompose services that aren't managed via buildcontrol 147 148 err = r.maybeUpdateStatus(ctx, nn, &obj) 149 if err != nil { 150 return ctrl.Result{}, err 151 } 152 r.manageOwnedProjectWatches(ctx) 153 return r.manageOwnedLogStream(ctx, nn, &obj) 154 } 155 156 // Determine if we should deploy the current YAML. 157 // 158 // Ensures: 159 // 1. We have enough info to deploy, and 160 // 2. Either we haven't deployed before, 161 // or one of the inputs has changed since the last deploy. 162 func (r *Reconciler) shouldDeployOnReconcile( 163 nn types.NamespacedName, 164 obj *v1alpha1.DockerComposeService, 165 imageMaps map[types.NamespacedName]*v1alpha1.ImageMap, 166 ) bool { 167 if obj.Annotations[v1alpha1.AnnotationManagedBy] != "" { 168 // Until resource dependencies are expressed in the API, 169 // we can't use reconciliation to deploy KubernetesApply objects 170 // managed by the buildcontrol engine. 171 return false 172 } 173 174 for _, imageMapName := range obj.Spec.ImageMaps { 175 _, ok := imageMaps[types.NamespacedName{Name: imageMapName}] 176 if !ok { 177 // We haven't built the images yet to deploy. 178 return false 179 } 180 } 181 182 r.mu.Lock() 183 result, ok := r.results[nn] 184 r.mu.Unlock() 185 186 if !ok || result.Status.LastApplyStartTime.IsZero() { 187 // We've never successfully deployed before, so deploy now. 188 return true 189 } 190 191 if !apicmp.DeepEqual(obj.Spec, result.Spec) { 192 // The YAML to deploy changed. 193 return true 194 } 195 196 imageMapNames := obj.Spec.ImageMaps 197 if len(imageMapNames) != len(result.ImageMapSpecs) || 198 len(imageMapNames) != len(result.ImageMapStatuses) { 199 return true 200 } 201 202 for i, name := range obj.Spec.ImageMaps { 203 im := imageMaps[types.NamespacedName{Name: name}] 204 if !apicmp.DeepEqual(im.Spec, result.ImageMapSpecs[i]) { 205 206 return true 207 } 208 if !apicmp.DeepEqual(im.Status, result.ImageMapStatuses[i]) { 209 return true 210 } 211 } 212 213 return false 214 } 215 216 // We need to update the disable queue in two cases: 217 // 1) If the resource is enabled (to clear any pending deletes), or 218 // 2) If the resource is deleted but still running (to kickoff a delete). 219 func (r *Reconciler) updateForDisableQueue(nn types.NamespacedName, isDisabled bool) (resourceState, bool) { 220 r.mu.Lock() 221 defer r.mu.Unlock() 222 223 result, isExisting := r.results[nn] 224 if !isExisting { 225 return resourceState{}, false 226 } 227 228 if !isDisabled { 229 return resourceState{Name: nn.Name, Spec: result.Spec}, true 230 } 231 232 // We only need to do cleanup if there's a container available. 233 if result.Status.ContainerState != nil { 234 return resourceState{ 235 Name: nn.Name, 236 Spec: result.Spec, 237 NeedsCleanup: true, 238 StartTime: result.Status.ContainerState.StartedAt.Time, 239 }, true 240 } 241 242 return resourceState{}, false 243 } 244 245 // Records that a delete was performed. 246 func (r *Reconciler) recordRmOnDisable(nn types.NamespacedName) { 247 r.mu.Lock() 248 defer r.mu.Unlock() 249 250 result, isExisting := r.results[nn] 251 if !isExisting { 252 return 253 } 254 255 result.Status.ContainerID = "" 256 result.Status.ContainerName = "" 257 result.Status.ContainerState = nil 258 result.Status.PortBindings = nil 259 } 260 261 // Removes all state for an object. 262 func (r *Reconciler) clearResult(nn types.NamespacedName) { 263 r.mu.Lock() 264 defer r.mu.Unlock() 265 result, ok := r.results[nn] 266 if ok { 267 delete(r.resultsByServiceName, result.Spec.Service) 268 delete(r.results, nn) 269 } 270 } 271 272 // Create a result object if necessary. Caller must hold the mutex. 273 func (r *Reconciler) ensureResultExists(nn types.NamespacedName) *Result { 274 existing, hasExisting := r.results[nn] 275 if hasExisting { 276 return existing 277 } 278 279 result := &Result{Name: nn} 280 r.results[nn] = result 281 return result 282 } 283 284 // Record disable state of the service. 285 func (r *Reconciler) recordSpecAndDisableStatus( 286 nn types.NamespacedName, 287 spec v1alpha1.DockerComposeServiceSpec, 288 disableStatus v1alpha1.DisableStatus) { 289 r.mu.Lock() 290 defer r.mu.Unlock() 291 292 result := r.ensureResultExists(nn) 293 if !apicmp.DeepEqual(result.Spec, spec) { 294 delete(r.resultsByServiceName, result.Spec.Service) 295 result.Spec = spec 296 result.ProjectHash = dockercomposeservices.MustHashProject(spec.Project) 297 r.resultsByServiceName[result.Spec.Service] = result 298 } 299 300 if apicmp.DeepEqual(result.Status.DisableStatus, &disableStatus) { 301 return 302 } 303 304 update := result.Status.DeepCopy() 305 update.DisableStatus = &disableStatus 306 result.Status = *update 307 } 308 309 // A helper that deletes the Docker Compose service, even if they haven't been applied yet. 310 // 311 // Primarily intended so that the build controller can do force restarts. 312 func (r *Reconciler) ForceDelete( 313 ctx context.Context, 314 nn types.NamespacedName, 315 spec v1alpha1.DockerComposeServiceSpec, 316 reason string) error { 317 out := logger.Get(ctx).Writer(logger.InfoLvl) 318 out = filteredwriter.New(out, func(s string) bool { 319 // https://app.shortcut.com/windmill/story/13147/docker-compose-down-messages-for-disabled-resources-may-be-confusing 320 return strings.HasPrefix(s, "Going to remove") 321 }) 322 err := r.dcc.Rm(ctx, []v1alpha1.DockerComposeServiceSpec{spec}, out, out) 323 if err != nil { 324 logger.Get(ctx).Errorf("Error %s: %v", reason, err) 325 } 326 r.clearResult(nn) 327 r.requeuer.Add(nn) 328 return nil 329 } 330 331 // Apply the DockerCompose service spec, unconditionally, 332 // and requeue the reconciler so that it updates the apiserver. 333 // 334 // We expose this as a public method as a hack! Currently, in Tilt, BuildController 335 // handles dependencies between resources. The API server doesn't know about build 336 // dependencies yet. So Tiltfile-owned resources are applied manually, rather than 337 // going through the normal reconcile system. 338 func (r *Reconciler) ForceApply( 339 ctx context.Context, 340 nn types.NamespacedName, 341 spec v1alpha1.DockerComposeServiceSpec, 342 imageMaps map[types.NamespacedName]*v1alpha1.ImageMap, 343 dcManagedBuild bool) v1alpha1.DockerComposeServiceStatus { 344 status := r.forceApplyHelper(ctx, nn, spec, imageMaps, dcManagedBuild) 345 r.requeuer.Add(nn) 346 return status 347 } 348 349 // Records status when an apply fail. 350 // This might mean the image build failed, if we're using dc-managed image builds. 351 // Does not necessarily clear the current running container. 352 func (r *Reconciler) recordApplyError( 353 nn types.NamespacedName, 354 spec v1alpha1.DockerComposeServiceSpec, 355 imageMaps map[types.NamespacedName]*v1alpha1.ImageMap, 356 err error, 357 startTime metav1.MicroTime, 358 ) v1alpha1.DockerComposeServiceStatus { 359 r.mu.Lock() 360 defer r.mu.Unlock() 361 362 result := r.ensureResultExists(nn) 363 status := result.Status.DeepCopy() 364 status.LastApplyStartTime = startTime 365 status.LastApplyFinishTime = apis.NowMicro() 366 status.ApplyError = err.Error() 367 result.Status = *status 368 result.SetImageMapInputs(spec, imageMaps) 369 return *status 370 } 371 372 // Records status when an apply succeeds. 373 func (r *Reconciler) recordApplyStatus( 374 nn types.NamespacedName, 375 spec v1alpha1.DockerComposeServiceSpec, 376 imageMaps map[types.NamespacedName]*v1alpha1.ImageMap, 377 newStatus v1alpha1.DockerComposeServiceStatus, 378 ) v1alpha1.DockerComposeServiceStatus { 379 r.mu.Lock() 380 defer r.mu.Unlock() 381 382 result := r.ensureResultExists(nn) 383 disableStatus := result.Status.DisableStatus 384 newStatus.DisableStatus = disableStatus 385 result.Status = newStatus 386 result.SetImageMapInputs(spec, imageMaps) 387 388 return newStatus 389 } 390 391 // A helper that applies the given specs to the cluster, 392 // tracking the state of the deploy in the results map. 393 func (r *Reconciler) forceApplyHelper( 394 ctx context.Context, 395 nn types.NamespacedName, 396 spec v1alpha1.DockerComposeServiceSpec, 397 imageMaps map[types.NamespacedName]*v1alpha1.ImageMap, 398 // TODO(nick): Figure out a better way to infer the dcManagedBuild setting. 399 dcManagedBuild bool, 400 ) v1alpha1.DockerComposeServiceStatus { 401 startTime := apis.NowMicro() 402 stdout := logger.Get(ctx).Writer(logger.InfoLvl) 403 stderr := logger.Get(ctx).Writer(logger.InfoLvl) 404 err := r.dcc.Up(ctx, spec, dcManagedBuild, stdout, stderr) 405 if err != nil { 406 return r.recordApplyError(nn, spec, imageMaps, err, startTime) 407 } 408 409 // grab the initial container state 410 cid, err := r.dcc.ContainerID(ctx, spec) 411 if err != nil { 412 return r.recordApplyError(nn, spec, imageMaps, err, startTime) 413 } 414 415 containerJSON, err := r.dc.ContainerInspect(ctx, string(cid)) 416 if err != nil { 417 logger.Get(ctx).Debugf("Error inspecting container %s: %v", cid, err) 418 } 419 420 name := "" 421 var containerState *dtypes.ContainerState 422 if containerJSON.ContainerJSONBase != nil && containerJSON.ContainerJSONBase.State != nil { 423 containerState = containerJSON.ContainerJSONBase.State 424 425 // NOTE(nick): For some reason, docker container names start with "/" 426 // but are printed to the user without it. 427 name = strings.TrimPrefix(containerJSON.ContainerJSONBase.Name, "/") 428 } 429 430 var ports nat.PortMap 431 if containerJSON.NetworkSettings != nil { 432 ports = containerJSON.NetworkSettings.NetworkSettingsBase.Ports 433 } 434 435 status := dockercompose.ToServiceStatus(cid, name, containerState, ports) 436 status.LastApplyStartTime = startTime 437 status.LastApplyFinishTime = apis.NowMicro() 438 return r.recordApplyStatus(nn, spec, imageMaps, status) 439 } 440 441 // Update the status on the apiserver if necessary. 442 func (r *Reconciler) maybeUpdateStatus(ctx context.Context, nn types.NamespacedName, obj *v1alpha1.DockerComposeService) error { 443 newStatus := v1alpha1.DockerComposeServiceStatus{} 444 existing, ok := r.results[nn] 445 if ok { 446 newStatus = existing.Status 447 } 448 449 if apicmp.DeepEqual(obj.Status, newStatus) { 450 return nil 451 } 452 453 oldError := obj.Status.ApplyError 454 newError := newStatus.ApplyError 455 update := obj.DeepCopy() 456 update.Status = *(newStatus.DeepCopy()) 457 458 err := r.ctrlClient.Status().Update(ctx, update) 459 if err != nil { 460 return err 461 } 462 463 // Print new errors on objects that aren't managed by the buildcontroller. 464 if newError != "" && oldError != newError && update.Annotations[v1alpha1.AnnotationManagedBy] == "" { 465 logger.Get(ctx).Errorf("dockercomposeservice %s: %s", obj.Name, newError) 466 } 467 return nil 468 } 469 470 var imGVK = v1alpha1.SchemeGroupVersion.WithKind("ImageMap") 471 472 // indexDockerComposeService returns keys for all the objects we need to watch based on the spec. 473 func indexDockerComposeService(obj client.Object) []indexer.Key { 474 dcs := obj.(*v1alpha1.DockerComposeService) 475 result := []indexer.Key{} 476 for _, name := range dcs.Spec.ImageMaps { 477 result = append(result, indexer.Key{ 478 Name: types.NamespacedName{Name: name}, 479 GVK: imGVK, 480 }) 481 } 482 483 if dcs.Spec.DisableSource != nil { 484 cm := dcs.Spec.DisableSource.ConfigMap 485 if cm != nil { 486 cmGVK := v1alpha1.SchemeGroupVersion.WithKind("ConfigMap") 487 result = append(result, indexer.Key{ 488 Name: types.NamespacedName{Name: cm.Name}, 489 GVK: cmGVK, 490 }) 491 } 492 } 493 494 return result 495 } 496 497 // Keeps track of the state we currently know about. 498 type Result struct { 499 Name types.NamespacedName 500 Spec v1alpha1.DockerComposeServiceSpec 501 ImageMapSpecs []v1alpha1.ImageMapSpec 502 ImageMapStatuses []v1alpha1.ImageMapStatus 503 ProjectHash string 504 505 Status v1alpha1.DockerComposeServiceStatus 506 } 507 508 func (r *Result) SetImageMapInputs(spec v1alpha1.DockerComposeServiceSpec, imageMaps map[types.NamespacedName]*v1alpha1.ImageMap) { 509 r.ImageMapSpecs = nil 510 r.ImageMapStatuses = nil 511 for _, imageMapName := range spec.ImageMaps { 512 im, ok := imageMaps[types.NamespacedName{Name: imageMapName}] 513 if !ok { 514 // this should never happen, but if it does, just continue quietly. 515 continue 516 } 517 518 r.ImageMapSpecs = append(r.ImageMapSpecs, im.Spec) 519 r.ImageMapStatuses = append(r.ImageMapStatuses, im.Status) 520 } 521 } 522 523 // Keeps track of the projects we're currently watching. 524 type ProjectWatch struct { 525 ctx context.Context 526 cancel func() 527 project v1alpha1.DockerComposeProject 528 hash string 529 }