github.com/tilt-dev/tilt@v0.36.0/internal/controllers/core/podlogstream/podlogstreamcontroller_test.go (about) 1 package podlogstream 2 3 import ( 4 "context" 5 "fmt" 6 "io" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/jonboulle/clockwork" 12 "github.com/stretchr/testify/require" 13 14 "github.com/tilt-dev/tilt/internal/controllers/apicmp" 15 "github.com/tilt-dev/tilt/internal/timecmp" 16 "github.com/tilt-dev/tilt/pkg/apis" 17 18 "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" 19 20 "github.com/stretchr/testify/assert" 21 v1 "k8s.io/api/core/v1" 22 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 "k8s.io/apimachinery/pkg/types" 24 "sigs.k8s.io/controller-runtime/pkg/reconcile" 25 26 "github.com/tilt-dev/tilt/internal/container" 27 "github.com/tilt-dev/tilt/internal/controllers/fake" 28 "github.com/tilt-dev/tilt/internal/controllers/indexer" 29 "github.com/tilt-dev/tilt/internal/k8s" 30 "github.com/tilt-dev/tilt/internal/store" 31 "github.com/tilt-dev/tilt/internal/store/k8sconv" 32 "github.com/tilt-dev/tilt/internal/testutils/bufsync" 33 "github.com/tilt-dev/tilt/pkg/logger" 34 "github.com/tilt-dev/tilt/pkg/model" 35 ) 36 37 var podID = k8s.PodID("pod-id") 38 var cName = container.Name("cname") 39 var cID = container.ID("cid") 40 41 func TestLogs(t *testing.T) { 42 f := newPLMFixture(t) 43 44 f.kClient.SetLogsForPodContainer(podID, cName, "hello world!") 45 46 start := f.clock.Now() 47 48 pb := newPodBuilder(podID).addRunningContainer(cName, cID) 49 f.kClient.UpsertPod(pb.toPod()) 50 51 pls := plsFromPod("server", pb, start) 52 f.Create(pls) 53 54 f.triggerPodEvent(podID) 55 f.AssertOutputContains("hello world!") 56 f.AssertLogStartTime(start) 57 58 // Check to make sure that we're enqueuing pod changes as Reconcile() calls. 59 podNN := types.NamespacedName{Name: string(podID), Namespace: "default"} 60 streamNN := types.NamespacedName{Name: fmt.Sprintf("default-%s", podID)} 61 assert.Equal(t, []reconcile.Request{ 62 {NamespacedName: streamNN}, 63 }, f.plsc.podSource.indexer.EnqueueKey(indexer.Key{Name: podNN, GVK: podGVK})) 64 } 65 66 func TestLogCleanup(t *testing.T) { 67 f := newPLMFixture(t) 68 69 f.kClient.SetLogsForPodContainer(podID, cName, "hello world!") 70 71 start := f.clock.Now() 72 pb := newPodBuilder(podID).addRunningContainer(cName, cID) 73 f.kClient.UpsertPod(pb.toPod()) 74 75 pls := plsFromPod("server", pb, start) 76 f.Create(pls) 77 78 f.triggerPodEvent(podID) 79 f.AssertOutputContains("hello world!") 80 81 f.Delete(pls) 82 assert.Len(t, f.plsc.watches, 0) 83 84 // TODO(nick): Currently, namespace watches are never cleanedup, 85 // because the user might restart them again. 86 assert.Len(t, f.plsc.podSource.watchesByNamespace, 1) 87 } 88 89 func TestLogActions(t *testing.T) { 90 f := newPLMFixture(t) 91 92 f.kClient.SetLogsForPodContainer(podID, cName, "hello world!\ngoodbye world!\n") 93 94 pb := newPodBuilder(podID).addRunningContainer(cName, cID) 95 f.kClient.UpsertPod(pb.toPod()) 96 97 f.Create(plsFromPod("server", pb, time.Time{})) 98 99 f.triggerPodEvent(podID) 100 f.ConsumeLogActionsUntil("hello world!") 101 } 102 103 func TestLogsFailed(t *testing.T) { 104 f := newPLMFixture(t) 105 106 f.kClient.ContainerLogsError = fmt.Errorf("my-error") 107 108 pb := newPodBuilder(podID).addRunningContainer(cName, cID) 109 f.kClient.UpsertPod(pb.toPod()) 110 111 pls := plsFromPod("server", pb, time.Time{}) 112 f.Create(pls) 113 114 f.AssertOutputContains("Error streaming pod-id logs") 115 assert.Contains(t, f.out.String(), "my-error") 116 117 require.Eventually(t, 118 func() bool { 119 // Check to make sure the status has an error. 120 f.MustGet(f.KeyForObject(pls), pls) 121 return apicmp.DeepEqual(pls.Status, 122 PodLogStreamStatus{ 123 ContainerStatuses: []ContainerLogStreamStatus{ 124 { 125 Name: "cname", 126 Error: "my-error", 127 }, 128 }, 129 }) 130 }, 131 time.Second, 10*time.Millisecond, 132 "Expected error not present on PodLogStreamStatus: %v", pls, 133 ) 134 135 result := f.MustReconcile(f.KeyForObject(pls)) 136 assert.Equal(t, 2*time.Second, result.RequeueAfter) 137 138 f.clock.Advance(2 * time.Second) 139 140 assert.Eventually(f.t, func() bool { 141 result = f.MustReconcile(f.KeyForObject(pls)) 142 return result.RequeueAfter == 4*time.Second 143 }, time.Second, 5*time.Millisecond, "should re-stream and backoff again") 144 } 145 146 func TestLogsCanceledUnexpectedly(t *testing.T) { 147 f := newPLMFixture(t) 148 149 f.kClient.SetLogsForPodContainer(podID, cName, "hello world!\n") 150 151 pb := newPodBuilder(podID).addRunningContainer(cName, cID) 152 f.kClient.UpsertPod(pb.toPod()) 153 pls := plsFromPod("server", pb, time.Time{}) 154 f.Create(pls) 155 156 f.AssertOutputContains("hello world!\n") 157 158 // Wait until the previous log stream finishes. 159 assert.Eventually(f.t, func() bool { 160 f.MustGet(f.KeyForObject(pls), pls) 161 statuses := pls.Status.ContainerStatuses 162 if len(statuses) != 1 { 163 return false 164 } 165 return !statuses[0].Active 166 }, time.Second, 5*time.Millisecond) 167 168 // Set new logs, as if the pod restarted. 169 f.kClient.SetLogsForPodContainer(podID, cName, "goodbye world!\n") 170 f.triggerPodEvent(podID) 171 f.clock.Advance(10 * time.Second) 172 f.MustReconcile(types.NamespacedName{Name: pls.Name}) 173 f.AssertOutputContains("goodbye world!\n") 174 } 175 176 func TestMultiContainerLogs(t *testing.T) { 177 f := newPLMFixture(t) 178 179 f.kClient.SetLogsForPodContainer(podID, "cont1", "hello world!") 180 f.kClient.SetLogsForPodContainer(podID, "cont2", "goodbye world!") 181 182 pb := newPodBuilder(podID). 183 addRunningContainer("cont1", "cid1"). 184 addRunningContainer("cont2", "cid2") 185 f.kClient.UpsertPod(pb.toPod()) 186 f.Create(plsFromPod("server", pb, time.Time{})) 187 188 f.AssertOutputContains("hello world!") 189 f.AssertOutputContains("goodbye world!") 190 } 191 192 func TestContainerPrefixes(t *testing.T) { 193 f := newPLMFixture(t) 194 195 pID1 := k8s.PodID("pod1") 196 cNamePrefix1 := container.Name("yes-prefix-1") 197 cNamePrefix2 := container.Name("yes-prefix-2") 198 f.kClient.SetLogsForPodContainer(pID1, cNamePrefix1, "hello world!") 199 f.kClient.SetLogsForPodContainer(pID1, cNamePrefix2, "goodbye world!") 200 201 pID2 := k8s.PodID("pod2") 202 cNameNoPrefix := container.Name("no-prefix") 203 f.kClient.SetLogsForPodContainer(pID2, cNameNoPrefix, "hello jupiter!") 204 205 pbMultiC := newPodBuilder(pID1). 206 // Pod with multiple containers -- logs should be prefixed with container name 207 addRunningContainer(cNamePrefix1, "cid1"). 208 addRunningContainer(cNamePrefix2, "cid2") 209 f.kClient.UpsertPod(pbMultiC.toPod()) 210 211 f.Create(plsFromPod("multiContainer", pbMultiC, time.Time{})) 212 213 pbSingleC := newPodBuilder(pID2). 214 // Pod with just one container -- logs should NOT be prefixed with container name 215 addRunningContainer(cNameNoPrefix, "cid3") 216 f.kClient.UpsertPod(pbSingleC.toPod()) 217 218 f.Create(plsFromPod("singleContainer", pbSingleC, time.Time{})) 219 220 // Make sure we have expected logs 221 f.AssertOutputContains("hello world!") 222 f.AssertOutputContains("goodbye world!") 223 f.AssertOutputContains("hello jupiter!") 224 225 // Check for un/expected prefixes 226 f.AssertOutputContains(cNamePrefix1.String()) 227 f.AssertOutputContains(cNamePrefix2.String()) 228 f.AssertOutputDoesNotContain(cNameNoPrefix.String()) 229 } 230 231 func TestTerminatedContainerLogs(t *testing.T) { 232 f := newPLMFixture(t) 233 234 cName := container.Name("cName") 235 pb := newPodBuilder(podID).addTerminatedContainer(cName, "cID") 236 f.kClient.UpsertPod(pb.toPod()) 237 238 f.kClient.SetLogsForPodContainer(podID, cName, "hello world!") 239 240 f.Create(plsFromPod("server", pb, time.Time{})) 241 242 // Fire OnChange twice, because we used to have a bug where 243 // we'd immediately teardown the log watch on the terminated container. 244 f.triggerPodEvent(podID) 245 f.triggerPodEvent(podID) 246 247 f.AssertOutputContains("hello world!") 248 249 // Make sure that we don't try to re-stream after the terminated container 250 // closes the log stream. 251 f.kClient.SetLogsForPodContainer(podID, cName, "hello world!\ngoodbye world!\n") 252 253 f.triggerPodEvent(podID) 254 f.AssertOutputContains("hello world!") 255 f.AssertOutputDoesNotContain("goodbye world!") 256 } 257 258 // https://github.com/tilt-dev/tilt/issues/3908 259 func TestLogReconnection(t *testing.T) { 260 f := newPLMFixture(t) 261 cName := container.Name("cName") 262 pb := newPodBuilder(podID).addRunningContainer(cName, "cID") 263 f.kClient.UpsertPod(pb.toPod()) 264 265 reader, writer := io.Pipe() 266 defer func() { 267 require.NoError(t, writer.Close()) 268 }() 269 f.kClient.SetLogReaderForPodContainer(podID, cName, reader) 270 271 // Set up fake time 272 startTime := f.clock.Now() 273 f.Create(plsFromPod("server", pb, startTime)) 274 275 _, err := writer.Write([]byte("hello world!")) 276 require.NoError(t, err) 277 f.AssertOutputContains("hello world!") 278 f.AssertLogStartTime(startTime) 279 280 f.clock.Advance(20 * time.Second) 281 lastRead := f.clock.Now() 282 _, _ = writer.Write([]byte("hello world2!")) 283 f.AssertOutputContains("hello world2!") 284 285 // Simulate Kubernetes rotating the logs by creating a new pipe. 286 reader2, writer2 := io.Pipe() 287 defer func() { 288 require.NoError(t, writer2.Close()) 289 }() 290 f.kClient.SetLogReaderForPodContainer(podID, cName, reader2) 291 go func() { 292 _, _ = writer2.Write([]byte("goodbye world!")) 293 }() 294 f.AssertOutputDoesNotContain("goodbye world!") 295 296 f.clock.Advance(5 * time.Second) 297 f.AssertOutputDoesNotContain("goodbye world!") 298 299 f.clock.Advance(5 * time.Second) 300 f.AssertOutputDoesNotContain("goodbye world!") 301 f.AssertLogStartTime(startTime) 302 303 // simulate 15s since we last read a log; this triggers a reconnect 304 f.clock.Advance(15 * time.Second) 305 assert.Eventually(t, func() bool { 306 return f.kClient.LastPodLogContext.Err() != nil 307 }, time.Second, time.Millisecond) 308 require.NoError(t, writer.Close()) 309 310 f.AssertOutputContains("goodbye world!") 311 312 // Make sure the start time was adjusted for when the last read happened. 313 f.AssertLogStartTime(lastRead.Add(podLogReconnectGap)) 314 } 315 316 func TestInitContainerLogs(t *testing.T) { 317 f := newPLMFixture(t) 318 319 f.kClient.SetLogsForPodContainer(podID, "cont1", "hello world!") 320 321 cNameInit := container.Name("cNameInit") 322 cNameNormal := container.Name("cNameNormal") 323 pb := newPodBuilder(podID). 324 addTerminatedInitContainer(cNameInit, "cID-init"). 325 addRunningContainer(cNameNormal, "cID-normal") 326 f.kClient.UpsertPod(pb.toPod()) 327 328 f.kClient.SetLogsForPodContainer(podID, cNameInit, "init world!") 329 f.kClient.SetLogsForPodContainer(podID, cNameNormal, "hello world!") 330 331 f.Create(plsFromPod("server", pb, time.Time{})) 332 333 f.AssertOutputContains(cNameInit.String()) 334 f.AssertOutputContains("init world!") 335 f.AssertOutputDoesNotContain(cNameNormal.String()) 336 f.AssertOutputContains("hello world!") 337 } 338 339 func TestIgnoredContainerLogs(t *testing.T) { 340 f := newPLMFixture(t) 341 342 f.kClient.SetLogsForPodContainer(podID, "cont1", "hello world!") 343 344 istioInit := container.IstioInitContainerName 345 istioSidecar := container.IstioSidecarContainerName 346 cNormal := container.Name("cNameNormal") 347 pb := newPodBuilder(podID). 348 addTerminatedInitContainer(istioInit, "cID-init"). 349 addRunningContainer(istioSidecar, "cID-sidecar"). 350 addRunningContainer(cNormal, "cID-normal") 351 f.kClient.UpsertPod(pb.toPod()) 352 353 f.kClient.SetLogsForPodContainer(podID, istioInit, "init istio!") 354 f.kClient.SetLogsForPodContainer(podID, istioSidecar, "hello istio!") 355 f.kClient.SetLogsForPodContainer(podID, cNormal, "hello world!") 356 357 pls := plsFromPod("server", pb, time.Time{}) 358 pls.Spec.IgnoreContainers = []string{string(istioInit), string(istioSidecar)} 359 f.Create(pls) 360 361 f.AssertOutputDoesNotContain("istio") 362 f.AssertOutputContains("hello world!") 363 } 364 365 // Our old Fake Kubernetes client used to interact badly 366 // with the pod log stream reconciler, leading to an infinite 367 // loop in tests. 368 func TestInfiniteLoop(t *testing.T) { 369 f := newPLMFixture(t) 370 371 f.kClient.SetLogsForPodContainer(podID, "cont1", "hello world!") 372 373 pb := newPodBuilder(podID). 374 addRunningContainer("cNameNormal", "cID-normal") 375 f.kClient.UpsertPod(pb.toPod()) 376 377 pls := plsFromPod("server", pb, time.Time{}) 378 f.Create(pls) 379 380 nn := types.NamespacedName{Name: pls.Name} 381 f.MustReconcile(nn) 382 383 // Make sure this goes into an active state and stays there. 384 assert.Eventually(t, func() bool { 385 var pls v1alpha1.PodLogStream 386 f.MustGet(nn, &pls) 387 return len(pls.Status.ContainerStatuses) > 0 && pls.Status.ContainerStatuses[0].Active 388 }, 200*time.Millisecond, 10*time.Millisecond) 389 390 assert.Never(t, func() bool { 391 var pls v1alpha1.PodLogStream 392 f.MustGet(nn, &pls) 393 return len(pls.Status.ContainerStatuses) == 0 || !pls.Status.ContainerStatuses[0].Active 394 }, 200*time.Millisecond, 10*time.Millisecond) 395 396 _ = f.kClient.LastPodLogPipeWriter.CloseWithError(fmt.Errorf("manually closed")) 397 398 assert.Eventually(t, func() bool { 399 var pls v1alpha1.PodLogStream 400 f.MustGet(nn, &pls) 401 if len(pls.Status.ContainerStatuses) == 0 { 402 return false 403 } 404 cst := pls.Status.ContainerStatuses[0] 405 return !cst.Active && strings.Contains(cst.Error, "manually closed") 406 }, 200*time.Millisecond, 10*time.Millisecond) 407 } 408 409 func TestReconcilerIndexing(t *testing.T) { 410 f := newPLMFixture(t) 411 412 pls := plsFromPod("server", newPodBuilder(podID), f.clock.Now()) 413 pls.Namespace = "some-ns" 414 pls.Spec.Cluster = "my-cluster" 415 f.Create(pls) 416 417 ctx := context.Background() 418 reqs := f.plsc.indexer.Enqueue(ctx, &v1alpha1.Cluster{ 419 ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "my-cluster"}, 420 }) 421 assert.ElementsMatch(t, []reconcile.Request{ 422 {NamespacedName: types.NamespacedName{Namespace: "some-ns", Name: "default-pod-id"}}, 423 }, reqs) 424 } 425 426 func TestDeletionTimestamp(t *testing.T) { 427 f := newPLMFixture(t) 428 429 f.kClient.SetLogsForPodContainer(podID, cName, "hello world!") 430 431 start := f.clock.Now() 432 433 pb := newPodBuilder(podID).addRunningContainer(cName, cID).addDeletionTimestamp() 434 f.kClient.UpsertPod(pb.toPod()) 435 436 pls := plsFromPod("server", pb, start) 437 f.Create(pls) 438 439 f.triggerPodEvent(podID) 440 441 nn := types.NamespacedName{Name: pls.Name} 442 f.MustReconcile(nn) 443 444 f.AssertOutputContains("hello world!") 445 446 assert.Eventually(f.t, func() bool { 447 f.Get(nn, pls) 448 return len(pls.Status.ContainerStatuses) == 1 && !pls.Status.ContainerStatuses[0].Active 449 }, time.Second, 5*time.Millisecond, "should stream then stop") 450 451 // No log streams should be active. 452 assert.Equal(t, pls.Status, v1alpha1.PodLogStreamStatus{ 453 ContainerStatuses: []v1alpha1.ContainerLogStreamStatus{ 454 v1alpha1.ContainerLogStreamStatus{Name: "cname"}, 455 }, 456 }) 457 458 // The cname stream is closed forever. 459 assert.Len(t, f.plsc.hasClosedStream, 1) 460 } 461 462 func TestMissingPod(t *testing.T) { 463 f := newPLMFixture(t) 464 465 f.kClient.SetLogsForPodContainer(podID, cName, "hello world!") 466 467 start := f.clock.Now() 468 469 pb := newPodBuilder(podID).addRunningContainer(cName, cID) 470 pls := plsFromPod("server", pb, start) 471 nn := types.NamespacedName{Name: pls.Name} 472 result := f.Create(pls) 473 assert.Equal(t, time.Second, result.RequeueAfter) 474 475 result = f.MustReconcile(nn) 476 assert.Equal(t, 2*time.Second, result.RequeueAfter) 477 478 f.Get(nn, pls) 479 assert.Equal(t, "pod not found: default/pod-id", pls.Status.Error) 480 481 f.kClient.UpsertPod(pb.toPod()) 482 483 result = f.MustReconcile(nn) 484 assert.Equal(t, time.Duration(0), result.RequeueAfter) 485 486 f.AssertOutputContains("hello world!") 487 f.AssertLogStartTime(start) 488 } 489 490 func TestFailedToCreateLogWatcher(t *testing.T) { 491 f := newPLMFixture(t) 492 493 f.kClient.SetLogsForPodContainer(podID, cName, 494 "listening on 8080\nfailed to create fsnotify watcher: too many open files") 495 496 start := f.clock.Now() 497 498 pb := newPodBuilder(podID).addRunningContainer(cName, cID) 499 f.kClient.UpsertPod(pb.toPod()) 500 501 pls := plsFromPod("server", pb, start) 502 f.Create(pls) 503 504 f.triggerPodEvent(podID) 505 f.AssertOutputContains(`listening on 8080 506 failed to create fsnotify watcher: too many open files 507 Error streaming pod-id logs: failed to create fsnotify watcher: too many open files. Consider adjusting inotify limits: https://kind.sigs.k8s.io/docs/user/known-issues/#pod-errors-due-to-too-many-open-files 508 `) 509 } 510 511 type plmStore struct { 512 t testing.TB 513 *store.TestingStore 514 out *bufsync.ThreadSafeBuffer 515 } 516 517 func newPLMStore(t testing.TB, out *bufsync.ThreadSafeBuffer) *plmStore { 518 return &plmStore{ 519 t: t, 520 TestingStore: store.NewTestingStore(), 521 out: out, 522 } 523 } 524 525 func (s *plmStore) Dispatch(action store.Action) { 526 event, ok := action.(store.LogAction) 527 if !ok { 528 s.t.Errorf("Expected action type LogAction. Actual: %T", action) 529 } 530 531 _, err := s.out.Write(event.Message()) 532 if err != nil { 533 fmt.Printf("error writing event: %v\n", err) 534 } 535 } 536 537 type plmFixture struct { 538 *fake.ControllerFixture 539 t testing.TB 540 ctx context.Context 541 kClient *k8s.FakeK8sClient 542 plsc *Controller 543 out *bufsync.ThreadSafeBuffer 544 store *plmStore 545 clock *clockwork.FakeClock 546 } 547 548 func newPLMFixture(t testing.TB) *plmFixture { 549 kClient := k8s.NewFakeK8sClient(t) 550 551 out := bufsync.NewThreadSafeBuffer() 552 ctx, cancel := context.WithCancel(context.Background()) 553 t.Cleanup(cancel) 554 ctx = logger.WithLogger(ctx, logger.NewTestLogger(out)) 555 556 cfb := fake.NewControllerFixtureBuilder(t) 557 558 clock := clockwork.NewFakeClock() 559 st := newPLMStore(t, out) 560 podSource := NewPodSource(ctx, kClient, cfb.Client.Scheme(), clock) 561 plsc := NewController(ctx, cfb.Client, cfb.Scheme(), st, kClient, podSource, clock) 562 563 return &plmFixture{ 564 t: t, 565 ControllerFixture: cfb.WithRequeuer(plsc.podSource).Build(plsc), 566 kClient: kClient, 567 plsc: plsc, 568 ctx: ctx, 569 out: out, 570 store: st, 571 clock: clock, 572 } 573 } 574 575 func (f *plmFixture) triggerPodEvent(podID k8s.PodID) { 576 podNN := types.NamespacedName{Name: string(podID), Namespace: "default"} 577 reqs := f.plsc.podSource.indexer.EnqueueKey(indexer.Key{Name: podNN, GVK: podGVK}) 578 for _, req := range reqs { 579 _, err := f.plsc.Reconcile(f.ctx, req) 580 assert.NoError(f.t, err) 581 } 582 } 583 584 func (f *plmFixture) ConsumeLogActionsUntil(expected string) { 585 start := time.Now() 586 for time.Since(start) < time.Second { 587 f.store.RLockState() 588 done := strings.Contains(f.out.String(), expected) 589 f.store.RUnlockState() 590 591 if done { 592 return 593 } 594 595 time.Sleep(10 * time.Millisecond) 596 } 597 598 f.t.Fatalf("Timeout. Collected output: %s", f.out.String()) 599 } 600 601 func (f *plmFixture) AssertOutputContains(s string) { 602 f.t.Helper() 603 f.out.AssertEventuallyContains(f.t, s, time.Second) 604 } 605 606 func (f *plmFixture) AssertOutputDoesNotContain(s string) { 607 time.Sleep(10 * time.Millisecond) 608 assert.NotContains(f.t, f.out.String(), s) 609 } 610 611 func (f *plmFixture) AssertLogStartTime(t time.Time) { 612 f.t.Helper() 613 614 // Truncate the time to match the behavior of metav1.Time 615 timecmp.AssertTimeEqual(f.t, t.Truncate(time.Second), f.kClient.LastPodLogStartTime) 616 } 617 618 type podBuilder v1.Pod 619 620 func newPodBuilder(id k8s.PodID) *podBuilder { 621 return (*podBuilder)(&v1.Pod{ 622 ObjectMeta: metav1.ObjectMeta{ 623 Name: string(id), 624 Namespace: "default", 625 }, 626 }) 627 } 628 629 func (pb *podBuilder) addDeletionTimestamp() *podBuilder { 630 now := metav1.Now() 631 pb.ObjectMeta.DeletionTimestamp = &now 632 return pb 633 } 634 635 func (pb *podBuilder) addRunningContainer(name container.Name, id container.ID) *podBuilder { 636 pb.Spec.Containers = append(pb.Spec.Containers, v1.Container{ 637 Name: string(name), 638 }) 639 pb.Status.ContainerStatuses = append(pb.Status.ContainerStatuses, v1.ContainerStatus{ 640 Name: string(name), 641 ContainerID: fmt.Sprintf("containerd://%s", id), 642 Image: fmt.Sprintf("image-%s", strings.ToLower(string(name))), 643 ImageID: fmt.Sprintf("image-%s", strings.ToLower(string(name))), 644 Ready: true, 645 State: v1.ContainerState{ 646 Running: &v1.ContainerStateRunning{ 647 StartedAt: metav1.Now(), 648 }, 649 }, 650 }) 651 return pb 652 } 653 654 func (pb *podBuilder) addRunningInitContainer(name container.Name, id container.ID) *podBuilder { 655 pb.Spec.InitContainers = append(pb.Spec.InitContainers, v1.Container{ 656 Name: string(name), 657 }) 658 pb.Status.InitContainerStatuses = append(pb.Status.InitContainerStatuses, v1.ContainerStatus{ 659 Name: string(name), 660 ContainerID: fmt.Sprintf("containerd://%s", id), 661 Image: fmt.Sprintf("image-%s", strings.ToLower(string(name))), 662 ImageID: fmt.Sprintf("image-%s", strings.ToLower(string(name))), 663 Ready: true, 664 State: v1.ContainerState{ 665 Running: &v1.ContainerStateRunning{ 666 StartedAt: metav1.Now(), 667 }, 668 }, 669 }) 670 return pb 671 } 672 673 func (pb *podBuilder) addTerminatedContainer(name container.Name, id container.ID) *podBuilder { 674 pb.addRunningContainer(name, id) 675 statuses := pb.Status.ContainerStatuses 676 statuses[len(statuses)-1].State.Running = nil 677 statuses[len(statuses)-1].State.Terminated = &v1.ContainerStateTerminated{ 678 StartedAt: metav1.Now(), 679 } 680 return pb 681 } 682 683 func (pb *podBuilder) addTerminatedInitContainer(name container.Name, id container.ID) *podBuilder { 684 pb.addRunningInitContainer(name, id) 685 statuses := pb.Status.InitContainerStatuses 686 statuses[len(statuses)-1].State.Running = nil 687 statuses[len(statuses)-1].State.Terminated = &v1.ContainerStateTerminated{ 688 StartedAt: metav1.Now(), 689 } 690 return pb 691 } 692 693 func (pb *podBuilder) toPod() *v1.Pod { 694 return (*v1.Pod)(pb) 695 } 696 697 func plsFromPod(mn model.ManifestName, pb *podBuilder, start time.Time) *v1alpha1.PodLogStream { 698 var sinceTime *metav1.Time 699 if !start.IsZero() { 700 t := apis.NewTime(start) 701 sinceTime = &t 702 } 703 return &v1alpha1.PodLogStream{ 704 ObjectMeta: metav1.ObjectMeta{ 705 Name: fmt.Sprintf("%s-%s", pb.Namespace, pb.Name), 706 Annotations: map[string]string{ 707 v1alpha1.AnnotationManifest: string(mn), 708 v1alpha1.AnnotationSpanID: string(k8sconv.SpanIDForPod(mn, k8s.PodID(pb.Name))), 709 }, 710 }, 711 Spec: PodLogStreamSpec{ 712 Namespace: pb.Namespace, 713 Pod: pb.Name, 714 SinceTime: sinceTime, 715 }, 716 } 717 }