github.com/kata-containers/runtime@v0.0.0-20210505125100-04f29832a923/cli/config/configuration-qemu-virtiofs.toml.in (about) 1 # Copyright (c) 2017-2019 Intel Corporation 2 # 3 # SPDX-License-Identifier: Apache-2.0 4 # 5 6 # XXX: WARNING: this file is auto-generated. 7 # XXX: 8 # XXX: Source file: "@CONFIG_QEMU_VIRTIOFS_IN@" 9 # XXX: Project: 10 # XXX: Name: @PROJECT_NAME@ 11 # XXX: Type: @PROJECT_TYPE@ 12 13 [hypervisor.qemu] 14 path = "@QEMUVIRTIOFSPATH@" 15 kernel = "@KERNELVIRTIOFSPATH@" 16 image = "@IMAGEPATH@" 17 machine_type = "@MACHINETYPE@" 18 19 # List of valid annotation names for the hypervisor 20 # Each member of the list is a regular expression, which is the base name 21 # of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" 22 # The default if not set is empty (all annotations rejected.) 23 # Your distribution recommends: @DEFENABLEANNOTATIONS@ 24 enable_annotations = @DEFENABLEANNOTATIONS@ 25 26 # List of valid annotation values for the hypervisor path 27 # Each member of the list is a path pattern as described by glob(3). 28 # The default if not set is empty (all annotations rejected.) 29 # Your distribution recommends: @QEMUVALIDHYPERVISORPATHS@ 30 valid_hypervisor_paths = @QEMUVALIDHYPERVISORPATHS@ 31 32 # Optional space-separated list of options to pass to the guest kernel. 33 # For example, use `kernel_params = "vsyscall=emulate"` if you are having 34 # trouble running pre-2.15 glibc. 35 # 36 # WARNING: - any parameter specified here will take priority over the default 37 # parameter value of the same name used to start the virtual machine. 38 # Do not set values here unless you understand the impact of doing so as you 39 # may stop the virtual machine from booting. 40 # To see the list of default parameters, enable hypervisor debug, create a 41 # container and look for 'default-kernel-parameters' log entries. 42 kernel_params = "@KERNELPARAMS@" 43 44 # Path to the firmware. 45 # If you want that qemu uses the default firmware leave this option empty 46 firmware = "@FIRMWAREPATH@" 47 48 # Machine accelerators 49 # comma-separated list of machine accelerators to pass to the hypervisor. 50 # For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"` 51 machine_accelerators="@MACHINEACCELERATORS@" 52 53 # CPU features 54 # comma-separated list of cpu features to pass to the cpu 55 # For example, `cpu_features = "pmu=off,vmx=off" 56 cpu_features="@CPUFEATURES@" 57 58 # Default number of vCPUs per SB/VM: 59 # unspecified or 0 --> will be set to @DEFVCPUS@ 60 # < 0 --> will be set to the actual number of physical cores 61 # > 0 <= number of physical cores --> will be set to the specified number 62 # > number of physical cores --> will be set to the actual number of physical cores 63 default_vcpus = 1 64 65 # Default maximum number of vCPUs per SB/VM: 66 # unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number 67 # of vCPUs supported by KVM if that number is exceeded 68 # > 0 <= number of physical cores --> will be set to the specified number 69 # > number of physical cores --> will be set to the actual number of physical cores or to the maximum number 70 # of vCPUs supported by KVM if that number is exceeded 71 # WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when 72 # the actual number of physical cores is greater than it. 73 # WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU 74 # the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs 75 # can be added to a SB/VM, but the memory footprint will be big. Another example, with 76 # `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of 77 # vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, 78 # unless you know what are you doing. 79 # NOTICE: on arm platform with gicv2 interrupt controller, set it to 8. 80 default_maxvcpus = @DEFMAXVCPUS@ 81 82 # Bridges can be used to hot plug devices. 83 # Limitations: 84 # * Currently only pci bridges are supported 85 # * Until 30 devices per bridge can be hot plugged. 86 # * Until 5 PCI bridges can be cold plugged per VM. 87 # This limitation could be a bug in qemu or in the kernel 88 # Default number of bridges per SB/VM: 89 # unspecified or 0 --> will be set to @DEFBRIDGES@ 90 # > 1 <= 5 --> will be set to the specified number 91 # > 5 --> will be set to 5 92 default_bridges = @DEFBRIDGES@ 93 94 # Default memory size in MiB for SB/VM. 95 # If unspecified then it will be set @DEFMEMSZ@ MiB. 96 default_memory = @DEFMEMSZ@ 97 # 98 # Default memory slots per SB/VM. 99 # If unspecified then it will be set @DEFMEMSLOTS@. 100 # This is will determine the times that memory will be hotadded to sandbox/VM. 101 #memory_slots = @DEFMEMSLOTS@ 102 103 # The size in MiB will be plused to max memory of hypervisor. 104 # It is the memory address space for the NVDIMM devie. 105 # If set block storage driver (block_device_driver) to "nvdimm", 106 # should set memory_offset to the size of block device. 107 # Default 0 108 #memory_offset = 0 109 110 # Disable block device from being used for a container's rootfs. 111 # In case of a storage driver like devicemapper where a container's 112 # root file system is backed by a block device, the block device is passed 113 # directly to the hypervisor for performance reasons. 114 # This flag prevents the block device from being passed to the hypervisor, 115 # 9pfs is used instead to pass the rootfs. 116 disable_block_device_use = @DEFDISABLEBLOCK@ 117 118 # Shared file system type: 119 # - virtio-fs (default) 120 # - virtio-9p 121 shared_fs = "@DEFSHAREDFS_QEMU_VIRTIOFS@" 122 123 # Path to vhost-user-fs daemon. 124 virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@" 125 126 # List of valid annotation values for the virtiofs daemon path 127 # Each member of the list is a path pattern as described by glob(3). 128 # The default if not set is empty (all annotations rejected.) 129 # Your distribution recommends: @DEFVALIDVIRTIOFSDAEMONPATHS@ 130 valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@ 131 132 # Default size of DAX cache in MiB 133 virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@ 134 135 # Extra args for virtiofsd daemon 136 # 137 # Format example: 138 # ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"] 139 # 140 # see `virtiofsd -h` for possible options. 141 virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@ 142 143 # Cache mode: 144 # 145 # - none 146 # Metadata, data, and pathname lookup are not cached in guest. They are 147 # always fetched from host and any changes are immediately pushed to host. 148 # 149 # - auto 150 # Metadata and pathname lookup cache expires after a configured amount of 151 # time (default is 1 second). Data is cached while the file is open (close 152 # to open consistency). 153 # 154 # - always 155 # Metadata, data, and pathname lookup are cached in guest and never expire. 156 virtio_fs_cache = "@DEFVIRTIOFSCACHE@" 157 158 # Block storage driver to be used for the hypervisor in case the container 159 # rootfs is backed by a block device. This is virtio-scsi, virtio-blk 160 # or nvdimm. 161 block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@" 162 163 # Specifies cache-related options will be set to block devices or not. 164 # Default false 165 #block_device_cache_set = true 166 167 # Specifies cache-related options for block devices. 168 # Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. 169 # Default false 170 #block_device_cache_direct = true 171 172 # Specifies cache-related options for block devices. 173 # Denotes whether flush requests for the device are ignored. 174 # Default false 175 #block_device_cache_noflush = true 176 177 # Enable iothreads (data-plane) to be used. This causes IO to be 178 # handled in a separate IO thread. This is currently only implemented 179 # for SCSI. 180 # 181 enable_iothreads = @DEFENABLEIOTHREADS@ 182 183 # Enable pre allocation of VM RAM, default false 184 # Enabling this will result in lower container density 185 # as all of the memory will be allocated and locked 186 # This is useful when you want to reserve all the memory 187 # upfront or in the cases where you want memory latencies 188 # to be very predictable 189 # Default false 190 #enable_mem_prealloc = true 191 192 # Enable huge pages for VM RAM, default false 193 # Enabling this will result in the VM memory 194 # being allocated using huge pages. 195 # This is useful when you want to use vhost-user network 196 # stacks within the container. This will automatically 197 # result in memory pre allocation 198 #enable_hugepages = true 199 200 # Enable vhost-user storage device, default false 201 # Enabling this will result in some Linux reserved block type 202 # major range 240-254 being chosen to represent vhost-user devices. 203 enable_vhost_user_store = @DEFENABLEVHOSTUSERSTORE@ 204 205 # The base directory specifically used for vhost-user devices. 206 # Its sub-path "block" is used for block devices; "block/sockets" is 207 # where we expect vhost-user sockets to live; "block/devices" is where 208 # simulated block device nodes for vhost-user devices to live. 209 vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@" 210 211 # Enable vIOMMU, default false 212 # Enabling this will result in the VM having a vIOMMU device 213 # This will also add the following options to the kernel's 214 # command line: intel_iommu=on,iommu=pt 215 #enable_iommu = true 216 217 # Enable IOMMU_PLATFORM, default false 218 # Enabling this will result in the VM device having iommu_platform=on set 219 #enable_iommu_platform = true 220 221 # List of valid annotation values for the virtiofs daemon path 222 # Each member of the list is a path pattern as described by glob(3). 223 # The default if not set is empty (all annotations rejected.) 224 # Your distribution recommends: @DEFVALIDVHOSTUSERSTOREPATHS@ 225 valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@ 226 227 # Enable file based guest memory support. The default is an empty string which 228 # will disable this feature. In the case of virtio-fs, this is enabled 229 # automatically and '/dev/shm' is used as the backing folder. 230 # This option will be ignored if VM templating is enabled. 231 #file_mem_backend = "@DEFFILEMEMBACKEND@" 232 233 # List of valid annotation values for the file_mem_backend path 234 # Each member of the list is a path pattern as described by glob(3). 235 # The default if not set is empty (all annotations rejected.) 236 # Your distribution recommends: @DEFVALIDVHOSTUSERSTOREPATHS@ 237 valid_file_mem_backends = @DEFVALIDFILEMEMBACKENDS@ 238 239 # Enable swap of vm memory. Default false. 240 # The behaviour is undefined if mem_prealloc is also set to true 241 #enable_swap = true 242 243 # This option changes the default hypervisor and kernel parameters 244 # to enable debug output where available. This extra output is added 245 # to the proxy logs, but only when proxy debug is also enabled. 246 # Note: Debug output will only be put into qemu.log in the event 247 # of a virtual hardware issue, otherwise it will be empty 248 # 249 # Default false 250 #enable_debug = true 251 252 # Disable the customizations done in the runtime when it detects 253 # that it is running on top a VMM. This will result in the runtime 254 # behaving as it would when running on bare metal. 255 # 256 #disable_nesting_checks = true 257 258 # This is the msize used for 9p shares. It is the number of bytes 259 # used for 9p packet payload. 260 #msize_9p = @DEFMSIZE9P@ 261 262 # If true and vsocks are supported, use vsocks to communicate directly 263 # with the agent and no proxy is started, otherwise use unix 264 # sockets and start a proxy to communicate with the agent. 265 # Default false 266 #use_vsock = true 267 268 # If false and nvdimm is supported, use nvdimm device to plug guest image. 269 # Otherwise virtio-block device is used. 270 # Default false 271 #disable_image_nvdimm = true 272 273 # VFIO devices are hotplugged on a bridge by default. 274 # Enable hotplugging on root bus. This may be required for devices with 275 # a large PCI bar, as this is a current limitation with hotplugging on 276 # a bridge. This value is valid for "pc" machine type. 277 # Default false 278 #hotplug_vfio_on_root_bus = true 279 280 # If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off 281 # security (vhost-net runs ring0) for network I/O performance. 282 #disable_vhost_net = true 283 284 # 285 # Default entropy source. 286 # The path to a host source of entropy (including a real hardware RNG) 287 # /dev/urandom and /dev/random are two main options. 288 # Be aware that /dev/random is a blocking source of entropy. If the host 289 # runs out of entropy, the VMs boot time will increase leading to get startup 290 # timeouts. 291 # The source of entropy /dev/urandom is non-blocking and provides a 292 # generally acceptable source of entropy. It should work well for pretty much 293 # all practical purposes. 294 #entropy_source= "@DEFENTROPYSOURCE@" 295 296 # Path to OCI hook binaries in the *guest rootfs*. 297 # This does not affect host-side hooks which must instead be added to 298 # the OCI spec passed to the runtime. 299 # 300 # You can create a rootfs with hooks by customizing the osbuilder scripts: 301 # https://github.com/kata-containers/osbuilder 302 # 303 # Hooks must be stored in a subdirectory of guest_hook_path according to their 304 # hook type, i.e. "guest_hook_path/{prestart,postart,poststop}". 305 # The agent will scan these directories for executable files and add them, in 306 # lexicographical order, to the lifecycle of the guest container. 307 # Hooks are executed in the runtime namespace of the guest. See the official documentation: 308 # https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks 309 # Warnings will be logged if any error is encountered will scanning for hooks, 310 # but it will not abort container execution. 311 #guest_hook_path = "/usr/share/oci/hooks" 312 313 [factory] 314 # VM templating support. Once enabled, new VMs are created from template 315 # using vm cloning. They will share the same initial kernel, initramfs and 316 # agent memory by mapping it readonly. It helps speeding up new container 317 # creation and saves a lot of memory if there are many kata containers running 318 # on the same host. 319 # 320 # When disabled, new VMs are created from scratch. 321 # 322 # Note: Requires "initrd=" to be set ("image=" is not supported). 323 # 324 # Default false 325 #enable_template = true 326 327 # Specifies the path of template. 328 # 329 # Default "/run/vc/vm/template" 330 #template_path = "/run/vc/vm/template" 331 332 # The number of caches of VMCache: 333 # unspecified or == 0 --> VMCache is disabled 334 # > 0 --> will be set to the specified number 335 # 336 # VMCache is a function that creates VMs as caches before using it. 337 # It helps speed up new container creation. 338 # The function consists of a server and some clients communicating 339 # through Unix socket. The protocol is gRPC in protocols/cache/cache.proto. 340 # The VMCache server will create some VMs and cache them by factory cache. 341 # It will convert the VM to gRPC format and transport it when gets 342 # requestion from clients. 343 # Factory grpccache is the VMCache client. It will request gRPC format 344 # VM and convert it back to a VM. If VMCache function is enabled, 345 # kata-runtime will request VM from factory grpccache when it creates 346 # a new sandbox. 347 # 348 # Default 0 349 #vm_cache_number = 0 350 351 # Specify the address of the Unix socket that is used by VMCache. 352 # 353 # Default /var/run/kata-containers/cache.sock 354 #vm_cache_endpoint = "/var/run/kata-containers/cache.sock" 355 356 [proxy.@PROJECT_TYPE@] 357 path = "@PROXYPATH@" 358 359 # If enabled, proxy messages will be sent to the system log 360 # (default: disabled) 361 #enable_debug = true 362 363 [shim.@PROJECT_TYPE@] 364 path = "@SHIMPATH@" 365 366 # If enabled, shim messages will be sent to the system log 367 # (default: disabled) 368 #enable_debug = true 369 370 # If enabled, the shim will create opentracing.io traces and spans. 371 # (See https://www.jaegertracing.io/docs/getting-started). 372 # 373 # Note: By default, the shim runs in a separate network namespace. Therefore, 374 # to allow it to send trace details to the Jaeger agent running on the host, 375 # it is necessary to set 'disable_new_netns=true' so that it runs in the host 376 # network namespace. 377 # 378 # (default: disabled) 379 #enable_tracing = true 380 381 [agent.@PROJECT_TYPE@] 382 # If enabled, make the agent display debug-level messages. 383 # (default: disabled) 384 #enable_debug = true 385 386 # Enable agent tracing. 387 # 388 # If enabled, the default trace mode is "dynamic" and the 389 # default trace type is "isolated". The trace mode and type are set 390 # explicity with the `trace_type=` and `trace_mode=` options. 391 # 392 # Notes: 393 # 394 # - Tracing is ONLY enabled when `enable_tracing` is set: explicitly 395 # setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing` 396 # will NOT activate agent tracing. 397 # 398 # - See https://github.com/kata-containers/agent/blob/master/TRACING.md for 399 # full details. 400 # 401 # (default: disabled) 402 #enable_tracing = true 403 # 404 #trace_mode = "dynamic" 405 #trace_type = "isolated" 406 407 # Comma separated list of kernel modules and their parameters. 408 # These modules will be loaded in the guest kernel using modprobe(8). 409 # The following example can be used to load two kernel modules with parameters 410 # - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] 411 # The first word is considered as the module name and the rest as its parameters. 412 # Container will not be started when: 413 # * A kernel module is specified and the modprobe command is not installed in the guest 414 # or it fails loading the module. 415 # * The module is not available in the guest or it doesn't met the guest kernel 416 # requirements, like architecture and version. 417 # 418 kernel_modules=[] 419 420 421 [netmon] 422 # If enabled, the network monitoring process gets started when the 423 # sandbox is created. This allows for the detection of some additional 424 # network being added to the existing network namespace, after the 425 # sandbox has been created. 426 # (default: disabled) 427 #enable_netmon = true 428 429 # Specify the path to the netmon binary. 430 path = "@NETMONPATH@" 431 432 # If enabled, netmon messages will be sent to the system log 433 # (default: disabled) 434 #enable_debug = true 435 436 [runtime] 437 # If enabled, the runtime will log additional debug messages to the 438 # system log 439 # (default: disabled) 440 #enable_debug = true 441 # 442 # Internetworking model 443 # Determines how the VM should be connected to the 444 # the container network interface 445 # Options: 446 # 447 # - bridged (Deprecated) 448 # Uses a linux bridge to interconnect the container interface to 449 # the VM. Works for most cases except macvlan and ipvlan. 450 # ***NOTE: This feature has been deprecated with plans to remove this 451 # feature in the future. Please use other network models listed below. 452 # 453 # - macvtap 454 # Used when the Container network interface can be bridged using 455 # macvtap. 456 # 457 # - none 458 # Used when customize network. Only creates a tap device. No veth pair. 459 # 460 # - tcfilter 461 # Uses tc filter rules to redirect traffic from the network interface 462 # provided by plugin to a tap interface connected to the VM. 463 # 464 internetworking_model="@DEFNETWORKMODEL_QEMU@" 465 466 # disable guest seccomp 467 # Determines whether container seccomp profiles are passed to the virtual 468 # machine and applied by the kata agent. If set to true, seccomp is not applied 469 # within the guest 470 # (default: true) 471 disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ 472 473 # If enabled, the runtime will create opentracing.io traces and spans. 474 # (See https://www.jaegertracing.io/docs/getting-started). 475 # (default: disabled) 476 #enable_tracing = true 477 478 # If enabled, the runtime will not create a network namespace for shim and hypervisor processes. 479 # This option may have some potential impacts to your host. It should only be used when you know what you're doing. 480 # `disable_new_netns` conflicts with `enable_netmon` 481 # `disable_new_netns` conflicts with `internetworking_model=bridged` and `internetworking_model=macvtap`. It works only 482 # with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge 483 # (like OVS) directly. 484 # If you are using docker, `disable_new_netns` only works with `docker run --net=none` 485 # (default: false) 486 #disable_new_netns = true 487 488 # if enabled, the runtime will add all the kata processes inside one dedicated cgroup. 489 # The container cgroups in the host are not created, just one single cgroup per sandbox. 490 # The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. 491 # The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. 492 # The sandbox cgroup is constrained if there is no container type annotation. 493 # See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType 494 sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ 495 496 # Enabled experimental feature list, format: ["a", "b"]. 497 # Experimental features are features not stable enough for production, 498 # they may break compatibility, and are prepared for a big version bump. 499 # Supported experimental features: 500 # (default: []) 501 experimental=@DEFAULTEXPFEATURES@