github.com/mirantis/virtlet@v1.5.2-0.20191204181327-1659b8a48e9b/pkg/libvirttools/virtualization_test.go (about) 1 /* 2 Copyright 2016-2019 Mirantis 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package libvirttools 18 19 import ( 20 "encoding/json" 21 "fmt" 22 "io/ioutil" 23 "os" 24 "path/filepath" 25 "strconv" 26 "testing" 27 "time" 28 29 "github.com/jonboulle/clockwork" 30 v1 "k8s.io/api/core/v1" 31 meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 32 "k8s.io/apimachinery/pkg/runtime" 33 fakekube "k8s.io/client-go/kubernetes/fake" 34 35 "github.com/Mirantis/virtlet/pkg/flexvolume" 36 "github.com/Mirantis/virtlet/pkg/fs" 37 fakefs "github.com/Mirantis/virtlet/pkg/fs/fake" 38 "github.com/Mirantis/virtlet/pkg/metadata" 39 fakemeta "github.com/Mirantis/virtlet/pkg/metadata/fake" 40 "github.com/Mirantis/virtlet/pkg/metadata/types" 41 "github.com/Mirantis/virtlet/pkg/utils" 42 fakeutils "github.com/Mirantis/virtlet/pkg/utils/fake" 43 testutils "github.com/Mirantis/virtlet/pkg/utils/testing" 44 "github.com/Mirantis/virtlet/pkg/virt/fake" 45 "github.com/Mirantis/virtlet/tests/gm" 46 ) 47 48 const ( 49 fakeImageName = "fake/image1" 50 fakeCNIConfig = `{"noCniForNow":true}` 51 fakeUUID = "abb67e3c-71b3-4ddd-5505-8c4215d5c4eb" 52 fakeContainerName = "container1" 53 fakeContainerAttempt = 42 54 stopContainerTimeout = 30 * time.Second 55 ) 56 57 type containerTester struct { 58 t *testing.T 59 clock clockwork.FakeClock 60 tmpDir string 61 kubeletRootDir string 62 virtTool *VirtualizationTool 63 rec *testutils.TopLevelRecorder 64 domainConn *fake.FakeDomainConnection 65 storageConn *fake.FakeStorageConnection 66 metadataStore metadata.Store 67 } 68 69 func newContainerTester(t *testing.T, rec *testutils.TopLevelRecorder, cmds []fakeutils.CmdSpec, files map[string]string) *containerTester { 70 ct := &containerTester{ 71 t: t, 72 clock: clockwork.NewFakeClockAt(time.Date(2017, 5, 30, 20, 19, 0, 0, time.UTC)), 73 } 74 75 var err error 76 ct.tmpDir, err = ioutil.TempDir("", "virtualization-test-") 77 if err != nil { 78 t.Fatalf("TempDir(): %v", err) 79 } 80 81 // __config__ is a hint for fake libvirt domain to fix the path so it becomes non-volatile 82 SetConfigIsoDir(filepath.Join(ct.tmpDir, "__config__")) 83 84 ct.rec = rec 85 ct.domainConn = fake.NewFakeDomainConnection(ct.rec.Child("domain conn")) 86 ct.storageConn = fake.NewFakeStorageConnection(ct.rec.Child("storage")) 87 88 ct.metadataStore, err = metadata.NewFakeStore() 89 if err != nil { 90 t.Fatalf("Failed to create fake bolt client: %v", err) 91 } 92 93 imageManager := newFakeImageManager(ct.rec) 94 ct.kubeletRootDir = filepath.Join(ct.tmpDir, "__fs__/kubelet-root") 95 mountDir := filepath.Join(ct.tmpDir, "__fs__/mounts") 96 virtConfig := VirtualizationConfig{ 97 VolumePoolName: "volumes", 98 RawDevices: []string{"loop*"}, 99 KubeletRootDir: ct.kubeletRootDir, 100 StreamerSocketPath: "/var/lib/libvirt/streamer.sock", 101 SharedFilesystemPath: mountDir, 102 } 103 fakeCommander := fakeutils.NewCommander(rec, cmds) 104 fakeCommander.ReplaceTempPath("__pods__", "/fakedev") 105 106 fs := fakefs.NewFakeFileSystem(t, rec, mountDir, files) 107 108 ct.virtTool = NewVirtualizationTool( 109 ct.domainConn, ct.storageConn, imageManager, ct.metadataStore, 110 GetDefaultVolumeSource(), virtConfig, fs, 111 fakeCommander) 112 ct.virtTool.SetClock(ct.clock) 113 114 return ct 115 } 116 117 func (ct *containerTester) setPodSandbox(config *types.PodSandboxConfig) { 118 psi, _ := metadata.NewPodSandboxInfo(config, nil, types.PodSandboxState_SANDBOX_READY, ct.clock) 119 sandbox := ct.metadataStore.PodSandbox(config.Uid) 120 err := sandbox.Save(func(c *types.PodSandboxInfo) (*types.PodSandboxInfo, error) { 121 return psi, nil 122 }) 123 if err != nil { 124 ct.t.Fatalf("Failed to store pod sandbox: %v", err) 125 } 126 } 127 128 func (ct *containerTester) teardown() { 129 os.RemoveAll(ct.tmpDir) 130 } 131 132 func (ct *containerTester) createContainer(sandbox *types.PodSandboxConfig, mounts []types.VMMount, volDevs []types.VMVolumeDevice) string { 133 vmConfig := &types.VMConfig{ 134 PodSandboxID: sandbox.Uid, 135 PodName: sandbox.Name, 136 PodNamespace: sandbox.Namespace, 137 Name: fakeContainerName, 138 Image: fakeImageName, 139 Attempt: fakeContainerAttempt, 140 PodAnnotations: sandbox.Annotations, 141 ContainerAnnotations: map[string]string{"foo": "bar"}, 142 Mounts: mounts, 143 VolumeDevices: volDevs, 144 LogDirectory: fmt.Sprintf("/var/log/pods/%s", sandbox.Uid), 145 LogPath: fmt.Sprintf("%s_%d.log", fakeContainerName, fakeContainerAttempt), 146 } 147 containerID, err := ct.virtTool.CreateContainer(vmConfig, "/tmp/fakenetns") 148 if err != nil { 149 ct.t.Fatalf("CreateContainer: %v", err) 150 } 151 return containerID 152 } 153 154 func (ct *containerTester) listContainers(filter *types.ContainerFilter) []*types.ContainerInfo { 155 containers, err := ct.virtTool.ListContainers(filter) 156 if err != nil { 157 ct.t.Fatalf("ListContainers() failed: %v", err) 158 } 159 return containers 160 } 161 162 func (ct *containerTester) containerInfo(containerID string) *types.ContainerInfo { 163 status, err := ct.virtTool.ContainerInfo(containerID) 164 if err != nil { 165 ct.t.Errorf("ContainerInfo(): %v", err) 166 } 167 return status 168 } 169 170 func (ct *containerTester) startContainer(containerID string) { 171 if err := ct.virtTool.StartContainer(containerID); err != nil { 172 ct.t.Fatalf("StartContainer failed for container %q: %v", containerID, err) 173 } 174 } 175 176 func (ct *containerTester) stopContainer(containerID string) { 177 if err := ct.virtTool.StopContainer(containerID, stopContainerTimeout); err != nil { 178 ct.t.Fatalf("StopContainer failed for container %q: %v", containerID, err) 179 } 180 } 181 182 func (ct *containerTester) removeContainer(containerID string) { 183 if err := ct.virtTool.RemoveContainer(containerID); err != nil { 184 ct.t.Fatalf("RemoveContainer failed for container %q: %v", containerID, err) 185 } 186 } 187 188 func (ct *containerTester) verifyContainerRootfsExists(containerInfo *types.ContainerInfo) bool { 189 storagePool, err := ct.storageConn.LookupStoragePoolByName("volumes") 190 if err != nil { 191 ct.t.Fatalf("Expected to found 'volumes' storage pool but failed with: %v", err) 192 } 193 // TODO: this is third place where rootfs volume name is calculated 194 // so there should be a func which will do it in consistent way there, 195 // in virtlet_root_volumesource.go and in virtualization.go 196 _, err = storagePool.LookupVolumeByName("virtlet_root_" + containerInfo.Config.PodSandboxID) 197 return err == nil 198 } 199 200 func TestContainerLifecycle(t *testing.T) { 201 ct := newContainerTester(t, testutils.NewToplevelRecorder(), nil, nil) 202 defer ct.teardown() 203 204 sandbox := fakemeta.GetSandboxes(1)[0] 205 ct.setPodSandbox(sandbox) 206 207 containers := ct.listContainers(nil) 208 if len(containers) != 0 { 209 t.Errorf("Unexpected containers when no containers are started: %#v", containers) 210 } 211 212 containerID := ct.createContainer(sandbox, nil, nil) 213 214 containers = ct.listContainers(nil) 215 if len(containers) != 1 { 216 t.Errorf("Expected single container to be started, but got: %#v", containers) 217 } 218 container := containers[0] 219 if container.Id != containerID { 220 t.Errorf("Bad container id in response: %q instead of %q", containers[0].Id, containerID) 221 } 222 if container.State != types.ContainerState_CONTAINER_CREATED { 223 t.Errorf("Bad container state: %v instead of %v", containers[0].State, types.ContainerState_CONTAINER_CREATED) 224 } 225 if container.Config.Name != fakeContainerName { 226 t.Errorf("Bad container name: %q instead of %q", containers[0].Config.Name, fakeContainerName) 227 } 228 if container.Config.Attempt != fakeContainerAttempt { 229 t.Errorf("Bad container attempt: %d instead of %d", containers[0].Config.Attempt, fakeContainerAttempt) 230 } 231 if container.Config.ContainerLabels[KubernetesContainerNameLabel] != fakeContainerName { 232 t.Errorf("Bad container name label: %q instead of %q", containers[0].Config.ContainerLabels[KubernetesContainerNameLabel], fakeContainerName) 233 } 234 if container.Config.ContainerAnnotations["foo"] != "bar" { 235 t.Errorf("Bad container annotation value: %q instead of %q", containers[0].Config.ContainerAnnotations["foo"], "bar") 236 } 237 ct.rec.Rec("container list after the container is created", containers) 238 239 ct.clock.Advance(1 * time.Second) 240 ct.startContainer(containerID) 241 242 container = ct.containerInfo(containerID) 243 if container.State != types.ContainerState_CONTAINER_RUNNING { 244 t.Errorf("Bad container state: %v instead of %v", container.State, types.ContainerState_CONTAINER_RUNNING) 245 } 246 ct.rec.Rec("container info after the container is started", container) 247 248 ct.stopContainer(containerID) 249 250 container = ct.containerInfo(containerID) 251 if container.State != types.ContainerState_CONTAINER_EXITED { 252 t.Errorf("Bad container state: %v instead of %v", container.State, types.ContainerState_CONTAINER_EXITED) 253 } 254 if container.Config.Name != fakeContainerName { 255 t.Errorf("Bad container name: %q instead of %q", container.Config.Name, fakeContainerName) 256 } 257 if container.Config.Attempt != fakeContainerAttempt { 258 t.Errorf("Bad container attempt: %d instead of %d", container.Config.Attempt, fakeContainerAttempt) 259 } 260 if container.Config.ContainerLabels[KubernetesContainerNameLabel] != fakeContainerName { 261 t.Errorf("Bad container name label: %q instead of %q", containers[0].Config.ContainerLabels[KubernetesContainerNameLabel], fakeContainerName) 262 } 263 if container.Config.ContainerAnnotations["foo"] != "bar" { 264 t.Errorf("Bad container annotation value: %q instead of %q", container.Config.ContainerAnnotations["foo"], "bar") 265 } 266 ct.rec.Rec("container info after the container is stopped", container) 267 268 ct.removeContainer(containerID) 269 270 containers = ct.listContainers(nil) 271 if len(containers) != 0 { 272 t.Errorf("Unexpected containers when no containers are started: %#v", containers) 273 } 274 275 if ct.verifyContainerRootfsExists(container) { 276 t.Errorf("Rootfs volume was not deleted for the container: %#v", container) 277 } 278 279 gm.Verify(t, gm.NewYamlVerifier(ct.rec.Content())) 280 } 281 282 func TestDomainForcedShutdown(t *testing.T) { 283 ct := newContainerTester(t, testutils.NewToplevelRecorder(), nil, nil) 284 defer ct.teardown() 285 286 sandbox := fakemeta.GetSandboxes(1)[0] 287 ct.setPodSandbox(sandbox) 288 289 containerID := ct.createContainer(sandbox, nil, nil) 290 ct.clock.Advance(1 * time.Second) 291 ct.startContainer(containerID) 292 293 ct.domainConn.SetIgnoreShutdown(true) 294 go func() { 295 // record a couple of ignored shutdown attempts before container destruction 296 ct.clock.BlockUntil(1) 297 ct.clock.Advance(6 * time.Second) 298 ct.clock.BlockUntil(1) 299 ct.clock.Advance(6 * time.Second) 300 ct.clock.BlockUntil(1) 301 ct.clock.Advance(30 * time.Second) 302 }() 303 304 ct.rec.Rec("invoking StopContainer()", nil) 305 ct.stopContainer(containerID) 306 container := ct.containerInfo(containerID) 307 if container.State != types.ContainerState_CONTAINER_EXITED { 308 t.Errorf("Bad container state: %v instead of %v", container.State, types.ContainerState_CONTAINER_EXITED) 309 } 310 ct.rec.Rec("container info after the container is stopped", container) 311 312 ct.rec.Rec("invoking RemoveContainer()", nil) 313 ct.removeContainer(containerID) 314 gm.Verify(t, gm.NewYamlVerifier(ct.rec.Content())) 315 } 316 317 func TestDoubleStartError(t *testing.T) { 318 ct := newContainerTester(t, testutils.NewToplevelRecorder(), nil, nil) 319 defer ct.teardown() 320 321 sandbox := fakemeta.GetSandboxes(1)[0] 322 ct.setPodSandbox(sandbox) 323 324 containerID := ct.createContainer(sandbox, nil, nil) 325 ct.clock.Advance(1 * time.Second) 326 ct.startContainer(containerID) 327 if err := ct.virtTool.StartContainer(containerID); err == nil { 328 t.Errorf("2nd StartContainer() didn't produce an error") 329 } 330 } 331 332 type volMount struct { 333 name string 334 containerPath string 335 podSubpath string 336 } 337 338 type volDevice struct { 339 name string 340 devicePath string 341 size int 342 } 343 344 func TestDomainDefinitions(t *testing.T) { 345 flexVolumeDriver := flexvolume.NewDriver(func() string { 346 // note that this is only good for just one flexvolume 347 return fakeUUID 348 }, fs.NullFileSystem) 349 for _, tc := range []struct { 350 name string 351 annotations map[string]string 352 flexVolumes map[string]map[string]interface{} 353 mounts []volMount 354 volDevs []volDevice 355 cmds []fakeutils.CmdSpec 356 objects []runtime.Object 357 }{ 358 { 359 name: "plain domain", 360 }, 361 { 362 name: "system UUID", 363 annotations: map[string]string{ 364 "VirtletSystemUUID": "53008994-44c0-4017-ad44-9c49758083da", 365 }, 366 }, 367 { 368 name: "raw devices", 369 flexVolumes: map[string]map[string]interface{}{ 370 "raw": { 371 "type": "raw", 372 // FIXME: here we depend upon the fact that /dev/loop0 373 // indeed exists in the build container. But we shouldn't. 374 "path": "/dev/loop0", 375 }, 376 }, 377 }, 378 { 379 name: "volumes", 380 flexVolumes: map[string]map[string]interface{}{ 381 "vol1": { 382 "type": "qcow2", 383 }, 384 "vol2": { 385 "type": "qcow2", 386 "capacity": "2MB", 387 }, 388 "vol3": { 389 "type": "qcow2", 390 }, 391 }, 392 }, 393 { 394 name: "vcpu count", 395 annotations: map[string]string{ 396 "VirtletVCPUCount": "4", 397 }, 398 }, 399 { 400 name: "ceph flexvolume", 401 flexVolumes: map[string]map[string]interface{}{ 402 "ceph": { 403 "type": "ceph", 404 "monitor": "127.0.0.1:6789", 405 "pool": "libvirt-pool", 406 "volume": "rbd-test-image", 407 "secret": "Zm9vYmFyCg==", 408 "user": "libvirt", 409 }, 410 }, 411 mounts: []volMount{ 412 { 413 name: "ceph", 414 containerPath: "/var/lib/whatever", 415 podSubpath: "volumes/virtlet~flexvolume_driver", 416 }, 417 }, 418 }, 419 { 420 name: "raw block volume", 421 volDevs: []volDevice{ 422 { 423 name: "testdev", 424 devicePath: "/dev/tst", 425 }, 426 }, 427 }, 428 { 429 name: "cloud-init", 430 annotations: map[string]string{ 431 "VirtletSSHKeys": "key1\nkey2", 432 }, 433 }, 434 { 435 name: "cloud-init with user data", 436 annotations: map[string]string{ 437 "VirtletSSHKeys": "key1\nkey2", 438 "VirtletCloudInitUserData": ` 439 users: 440 - name: cloudy`, 441 }, 442 }, 443 { 444 name: "virtio disk driver", 445 annotations: map[string]string{ 446 "VirtletDiskDriver": "virtio", 447 }, 448 }, 449 { 450 name: "persistent rootfs", 451 volDevs: []volDevice{ 452 { 453 name: "root", 454 devicePath: "/", 455 size: 512000, 456 }, 457 }, 458 cmds: []fakeutils.CmdSpec{ 459 { 460 Match: "blockdev --getsz", 461 Stdout: "1000", 462 }, 463 { 464 Match: "qemu-img convert", 465 }, 466 { 467 Match: "dmsetup create", 468 }, 469 { 470 Match: "dmsetup remove", 471 }, 472 }, 473 }, 474 { 475 name: "9pfs volume", 476 mounts: []volMount{ 477 { 478 name: "9pfs-vol", 479 containerPath: "/var/lib/foobar", 480 podSubpath: "volumes/kubernetes.io~rbd", 481 }, 482 }, 483 }, 484 { 485 name: "file injection", 486 annotations: map[string]string{ 487 "VirtletFilesFromDataSource": "secret/data", 488 }, 489 objects: []runtime.Object{ 490 &v1.Secret{ 491 ObjectMeta: meta_v1.ObjectMeta{ 492 Name: "data", 493 Namespace: "default", 494 }, 495 Data: map[string][]byte{ 496 "path_to_file_path": []byte("/path/to_file"), 497 "path_to_file": []byte("Y29udGVudA=="), 498 }, 499 }, 500 }, 501 }, 502 { 503 name: "file injection on persistent rootfs", 504 annotations: map[string]string{ 505 "VirtletFilesFromDataSource": "secret/data", 506 }, 507 volDevs: []volDevice{ 508 { 509 name: "root", 510 devicePath: "/", 511 size: 512000, 512 }, 513 }, 514 objects: []runtime.Object{ 515 &v1.Secret{ 516 ObjectMeta: meta_v1.ObjectMeta{ 517 Name: "data", 518 Namespace: "default", 519 }, 520 Data: map[string][]byte{ 521 "path_to_file_path": []byte("/path/to_file"), 522 "path_to_file": []byte("Y29udGVudA=="), 523 }, 524 }, 525 }, 526 cmds: []fakeutils.CmdSpec{ 527 { 528 Match: "blockdev --getsz", 529 Stdout: "1000", 530 }, 531 { 532 Match: "qemu-img convert", 533 }, 534 { 535 Match: "dmsetup create", 536 }, 537 { 538 Match: "dmsetup remove", 539 }, 540 }, 541 }, 542 // TODO: add test cases for rootfs / persistent rootfs file injection 543 } { 544 t.Run(tc.name, func(t *testing.T) { 545 rec := testutils.NewToplevelRecorder() 546 547 ct := newContainerTester(t, rec, tc.cmds, nil) 548 defer ct.teardown() 549 550 sandbox := fakemeta.GetSandboxes(1)[0] 551 sandbox.Annotations = tc.annotations 552 ct.setPodSandbox(sandbox) 553 554 for name, def := range tc.flexVolumes { 555 targetDir := filepath.Join(ct.kubeletRootDir, sandbox.Uid, "volumes/virtlet~flexvolume_driver", name) 556 resultStr := flexVolumeDriver.Run([]string{"mount", targetDir, utils.ToJSON(def)}) 557 var r map[string]interface{} 558 if err := json.Unmarshal([]byte(resultStr), &r); err != nil { 559 t.Errorf("failed to unmarshal flexvolume definition") 560 continue 561 } 562 if r["status"] != "Success" { 563 t.Errorf("mounting flexvolume %q failed: %s", name, r["message"]) 564 } 565 } 566 567 var mounts []types.VMMount 568 for _, m := range tc.mounts { 569 mounts = append(mounts, types.VMMount{ 570 HostPath: filepath.Join(ct.kubeletRootDir, sandbox.Uid, m.podSubpath, m.name), 571 ContainerPath: m.containerPath, 572 }) 573 } 574 575 var volDevs []types.VMVolumeDevice 576 for _, d := range tc.volDevs { 577 // __pods__ is a hint for fake libvirt domain to fix the path so it becomes non-volatile 578 baseDir := filepath.Join(ct.kubeletRootDir, "__pods__", sandbox.Uid, "volumeDevices/kubernetes.io~local-volume") 579 if err := os.MkdirAll(baseDir, 0777); err != nil { 580 t.Fatal(err) 581 } 582 hostPath := filepath.Join(baseDir, d.name) 583 if f, err := os.Create(hostPath); err != nil { 584 t.Fatal(err) 585 } else { 586 if d.size != 0 { 587 if _, err := f.Write(make([]byte, d.size)); err != nil { 588 t.Fatal(err) 589 } 590 } 591 if err := f.Close(); err != nil { 592 t.Fatal(err) 593 } 594 } 595 volDevs = append(volDevs, types.VMVolumeDevice{ 596 DevicePath: d.devicePath, 597 HostPath: hostPath, 598 }) 599 } 600 601 oldLoader := types.GetExternalDataLoader() 602 if tc.objects != nil { 603 fc := fakekube.NewSimpleClientset(tc.objects...) 604 types.SetExternalDataLoader(&defaultExternalDataLoader{kubeClient: fc}) 605 } 606 607 containerID := ct.createContainer(sandbox, mounts, volDevs) 608 609 // startContainer will cause fake Domain 610 // to dump the cloudinit iso content 611 ct.startContainer(containerID) 612 ct.removeContainer(containerID) 613 types.SetExternalDataLoader(oldLoader) 614 gm.Verify(t, gm.NewYamlVerifier(ct.rec.Content())) 615 }) 616 } 617 } 618 619 func TestDomainResourceConstraints(t *testing.T) { 620 cpuQuota := 25000 621 cpuPeriod := 100000 622 cpuShares := 100 623 memoryLimit := 1234567 624 cpuCount := 2 625 626 rec := testutils.NewToplevelRecorder() 627 rec.AddFilter("DefineDomain") 628 ct := newContainerTester(t, rec, nil, nil) 629 defer ct.teardown() 630 sandbox := fakemeta.GetSandboxes(1)[0] 631 sandbox.Annotations = map[string]string{ 632 "VirtletVCPUCount": strconv.Itoa(cpuCount), 633 } 634 ct.setPodSandbox(sandbox) 635 vmConfig := &types.VMConfig{ 636 PodSandboxID: sandbox.Uid, 637 PodName: sandbox.Name, 638 PodNamespace: sandbox.Namespace, 639 Name: fakeContainerName, 640 Image: fakeImageName, 641 Attempt: fakeContainerAttempt, 642 MemoryLimitInBytes: int64(memoryLimit), 643 CPUShares: int64(cpuShares), 644 CPUPeriod: int64(cpuPeriod), 645 CPUQuota: int64(cpuQuota), 646 PodAnnotations: sandbox.Annotations, 647 LogDirectory: fmt.Sprintf("/var/log/pods/%s", sandbox.Uid), 648 LogPath: fmt.Sprintf("%s_%d.log", fakeContainerName, fakeContainerAttempt), 649 } 650 if _, err := ct.virtTool.CreateContainer(vmConfig, "/tmp/fakenetns"); err != nil { 651 t.Fatalf("CreateContainer: %v", err) 652 } 653 654 gm.Verify(t, gm.NewYamlVerifier(ct.rec.Content())) 655 }