github.com/kata-containers/runtime@v0.0.0-20210505125100-04f29832a923/cli/config/configuration-fc.toml.in (about) 1 # Copyright (c) 2017-2019 Intel Corporation 2 # 3 # SPDX-License-Identifier: Apache-2.0 4 # 5 6 # XXX: WARNING: this file is auto-generated. 7 # XXX: 8 # XXX: Source file: "@CONFIG_FC_IN@" 9 # XXX: Project: 10 # XXX: Name: @PROJECT_NAME@ 11 # XXX: Type: @PROJECT_TYPE@ 12 13 [hypervisor.firecracker] 14 path = "@FCPATH@" 15 kernel = "@KERNELPATH_FC@" 16 image = "@IMAGEPATH@" 17 18 # List of valid annotation names for the hypervisor 19 # Each member of the list is a regular expression, which is the base name 20 # of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" 21 # The default if not set is empty (all annotations rejected.) 22 # Your distribution recommends: @DEFENABLEANNOTATIONS@ 23 enable_annotations = @DEFENABLEANNOTATIONS@ 24 25 # List of valid annotation values for the hypervisor path 26 # Each member of the list is a path pattern as described by glob(3). 27 # The default if not set is empty (all annotations rejected.) 28 # Your distribution recommends: @FCVALIDHYPERVISORPATHS@ 29 valid_hypervisor_paths = @FCVALIDHYPERVISORPATHS@ 30 31 # Path for the jailer specific to firecracker 32 # If the jailer path is not set kata will launch firecracker 33 # without a jail. If the jailer is set firecracker will be 34 # launched in a jailed enviornment created by the jailer 35 # This is disabled by default as additional setup is required 36 # for this feature today. 37 #jailer_path = "@FCJAILERPATH@" 38 39 # List of valid jailer path values for the hypervisor 40 # Each member of the list can be a regular expression 41 # The default if not set is empty (all annotations rejected.) 42 # Your distribution recommends: @FCVALIDJAILERPATHS@ 43 valid_jailer_paths = @FCVALIDJAILERPATHS@ 44 45 46 # Optional space-separated list of options to pass to the guest kernel. 47 # For example, use `kernel_params = "vsyscall=emulate"` if you are having 48 # trouble running pre-2.15 glibc. 49 # 50 # WARNING: - any parameter specified here will take priority over the default 51 # parameter value of the same name used to start the virtual machine. 52 # Do not set values here unless you understand the impact of doing so as you 53 # may stop the virtual machine from booting. 54 # To see the list of default parameters, enable hypervisor debug, create a 55 # container and look for 'default-kernel-parameters' log entries. 56 kernel_params = "@KERNELPARAMS@" 57 58 # Default number of vCPUs per SB/VM: 59 # unspecified or 0 --> will be set to @DEFVCPUS@ 60 # < 0 --> will be set to the actual number of physical cores 61 # > 0 <= number of physical cores --> will be set to the specified number 62 # > number of physical cores --> will be set to the actual number of physical cores 63 default_vcpus = 1 64 65 # Default maximum number of vCPUs per SB/VM: 66 # unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number 67 # of vCPUs supported by KVM if that number is exceeded 68 # > 0 <= number of physical cores --> will be set to the specified number 69 # > number of physical cores --> will be set to the actual number of physical cores or to the maximum number 70 # of vCPUs supported by KVM if that number is exceeded 71 # WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when 72 # the actual number of physical cores is greater than it. 73 # WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU 74 # the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs 75 # can be added to a SB/VM, but the memory footprint will be big. Another example, with 76 # `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of 77 # vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, 78 # unless you know what are you doing. 79 # NOTICE: on arm platform with gicv2 interrupt controller, set it to 8. 80 default_maxvcpus = @DEFMAXVCPUS@ 81 82 # Bridges can be used to hot plug devices. 83 # Limitations: 84 # * Currently only pci bridges are supported 85 # * Until 30 devices per bridge can be hot plugged. 86 # * Until 5 PCI bridges can be cold plugged per VM. 87 # This limitation could be a bug in the kernel 88 # Default number of bridges per SB/VM: 89 # unspecified or 0 --> will be set to @DEFBRIDGES@ 90 # > 1 <= 5 --> will be set to the specified number 91 # > 5 --> will be set to 5 92 default_bridges = @DEFBRIDGES@ 93 94 # Default memory size in MiB for SB/VM. 95 # If unspecified then it will be set @DEFMEMSZ@ MiB. 96 default_memory = @DEFMEMSZ@ 97 # 98 # Default memory slots per SB/VM. 99 # If unspecified then it will be set @DEFMEMSLOTS@. 100 # This is will determine the times that memory will be hotadded to sandbox/VM. 101 #memory_slots = @DEFMEMSLOTS@ 102 103 # The size in MiB will be plused to max memory of hypervisor. 104 # It is the memory address space for the NVDIMM devie. 105 # If set block storage driver (block_device_driver) to "nvdimm", 106 # should set memory_offset to the size of block device. 107 # Default 0 108 #memory_offset = 0 109 110 # Disable block device from being used for a container's rootfs. 111 # In case of a storage driver like devicemapper where a container's 112 # root file system is backed by a block device, the block device is passed 113 # directly to the hypervisor for performance reasons. 114 # This flag prevents the block device from being passed to the hypervisor, 115 # 9pfs is used instead to pass the rootfs. 116 disable_block_device_use = @DEFDISABLEBLOCK@ 117 118 # Block storage driver to be used for the hypervisor in case the container 119 # rootfs is backed by a block device. This is virtio-scsi, virtio-blk 120 # or nvdimm. 121 block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@" 122 123 # Specifies cache-related options will be set to block devices or not. 124 # Default false 125 #block_device_cache_set = true 126 127 # Specifies cache-related options for block devices. 128 # Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. 129 # Default false 130 #block_device_cache_direct = true 131 132 # Specifies cache-related options for block devices. 133 # Denotes whether flush requests for the device are ignored. 134 # Default false 135 #block_device_cache_noflush = true 136 137 # Enable pre allocation of VM RAM, default false 138 # Enabling this will result in lower container density 139 # as all of the memory will be allocated and locked 140 # This is useful when you want to reserve all the memory 141 # upfront or in the cases where you want memory latencies 142 # to be very predictable 143 # Default false 144 #enable_mem_prealloc = true 145 146 # Enable huge pages for VM RAM, default false 147 # Enabling this will result in the VM memory 148 # being allocated using huge pages. 149 # This is useful when you want to use vhost-user network 150 # stacks within the container. This will automatically 151 # result in memory pre allocation 152 #enable_hugepages = true 153 154 # Enable vIOMMU, default false 155 # Enabling this will result in the VM having a vIOMMU device 156 # This will also add the following options to the kernel's 157 # command line: intel_iommu=on,iommu=pt 158 #enable_iommu = true 159 160 # Enable swap of vm memory. Default false. 161 # The behaviour is undefined if mem_prealloc is also set to true 162 #enable_swap = true 163 164 # This option changes the default hypervisor and kernel parameters 165 # to enable debug output where available. This extra output is added 166 # to the proxy logs, but only when proxy debug is also enabled. 167 # 168 # Default false 169 #enable_debug = true 170 171 # Disable the customizations done in the runtime when it detects 172 # that it is running on top a VMM. This will result in the runtime 173 # behaving as it would when running on bare metal. 174 # 175 #disable_nesting_checks = true 176 177 # This is the msize used for 9p shares. It is the number of bytes 178 # used for 9p packet payload. 179 #msize_9p = @DEFMSIZE9P@ 180 181 # If true and vsocks are supported, use vsocks to communicate directly 182 # with the agent (no proxy is started). 183 # Default true 184 use_vsock = true 185 186 # VFIO devices are hotplugged on a bridge by default. 187 # Enable hotplugging on root bus. This may be required for devices with 188 # a large PCI bar, as this is a current limitation with hotplugging on 189 # a bridge. This value is valid for "pc" machine type. 190 # Default false 191 #hotplug_vfio_on_root_bus = true 192 193 # 194 # Default entropy source. 195 # The path to a host source of entropy (including a real hardware RNG) 196 # /dev/urandom and /dev/random are two main options. 197 # Be aware that /dev/random is a blocking source of entropy. If the host 198 # runs out of entropy, the VMs boot time will increase leading to get startup 199 # timeouts. 200 # The source of entropy /dev/urandom is non-blocking and provides a 201 # generally acceptable source of entropy. It should work well for pretty much 202 # all practical purposes. 203 #entropy_source= "@DEFENTROPYSOURCE@" 204 205 # Path to OCI hook binaries in the *guest rootfs*. 206 # This does not affect host-side hooks which must instead be added to 207 # the OCI spec passed to the runtime. 208 # 209 # You can create a rootfs with hooks by customizing the osbuilder scripts: 210 # https://github.com/kata-containers/osbuilder 211 # 212 # Hooks must be stored in a subdirectory of guest_hook_path according to their 213 # hook type, i.e. "guest_hook_path/{prestart,postart,poststop}". 214 # The agent will scan these directories for executable files and add them, in 215 # lexicographical order, to the lifecycle of the guest container. 216 # Hooks are executed in the runtime namespace of the guest. See the official documentation: 217 # https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks 218 # Warnings will be logged if any error is encountered will scanning for hooks, 219 # but it will not abort container execution. 220 #guest_hook_path = "/usr/share/oci/hooks" 221 222 [factory] 223 # VM templating support. Once enabled, new VMs are created from template 224 # using vm cloning. They will share the same initial kernel, initramfs and 225 # agent memory by mapping it readonly. It helps speeding up new container 226 # creation and saves a lot of memory if there are many kata containers running 227 # on the same host. 228 # 229 # When disabled, new VMs are created from scratch. 230 # 231 # Note: Requires "initrd=" to be set ("image=" is not supported). 232 # 233 # Default false 234 #enable_template = true 235 236 [shim.@PROJECT_TYPE@] 237 path = "@SHIMPATH@" 238 239 # If enabled, shim messages will be sent to the system log 240 # (default: disabled) 241 #enable_debug = true 242 243 # If enabled, the shim will create opentracing.io traces and spans. 244 # (See https://www.jaegertracing.io/docs/getting-started). 245 # 246 # Note: By default, the shim runs in a separate network namespace. Therefore, 247 # to allow it to send trace details to the Jaeger agent running on the host, 248 # it is necessary to set 'disable_new_netns=true' so that it runs in the host 249 # network namespace. 250 # 251 # (default: disabled) 252 #enable_tracing = true 253 254 [agent.@PROJECT_TYPE@] 255 # If enabled, make the agent display debug-level messages. 256 # (default: disabled) 257 #enable_debug = true 258 259 # Enable agent tracing. 260 # 261 # If enabled, the default trace mode is "dynamic" and the 262 # default trace type is "isolated". The trace mode and type are set 263 # explicity with the `trace_type=` and `trace_mode=` options. 264 # 265 # Notes: 266 # 267 # - Tracing is ONLY enabled when `enable_tracing` is set: explicitly 268 # setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing` 269 # will NOT activate agent tracing. 270 # 271 # - See https://github.com/kata-containers/agent/blob/master/TRACING.md for 272 # full details. 273 # 274 # (default: disabled) 275 #enable_tracing = true 276 # 277 #trace_mode = "dynamic" 278 #trace_type = "isolated" 279 280 # Comma separated list of kernel modules and their parameters. 281 # These modules will be loaded in the guest kernel using modprobe(8). 282 # The following example can be used to load two kernel modules with parameters 283 # - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] 284 # The first word is considered as the module name and the rest as its parameters. 285 # Container will not be started when: 286 # * A kernel module is specified and the modprobe command is not installed in the guest 287 # or it fails loading the module. 288 # * The module is not available in the guest or it doesn't met the guest kernel 289 # requirements, like architecture and version. 290 # 291 kernel_modules=[] 292 293 [netmon] 294 # If enabled, the network monitoring process gets started when the 295 # sandbox is created. This allows for the detection of some additional 296 # network being added to the existing network namespace, after the 297 # sandbox has been created. 298 # (default: disabled) 299 #enable_netmon = true 300 301 # Specify the path to the netmon binary. 302 path = "@NETMONPATH@" 303 304 # If enabled, netmon messages will be sent to the system log 305 # (default: disabled) 306 #enable_debug = true 307 308 [runtime] 309 # If enabled, the runtime will log additional debug messages to the 310 # system log 311 # (default: disabled) 312 #enable_debug = true 313 # 314 # Internetworking model 315 # Determines how the VM should be connected to the 316 # the container network interface 317 # Options: 318 # 319 # - macvtap 320 # Used when the Container network interface can be bridged using 321 # macvtap. 322 # 323 # - none 324 # Used when customize network. Only creates a tap device. No veth pair. 325 # 326 # - tcfilter 327 # Uses tc filter rules to redirect traffic from the network interface 328 # provided by plugin to a tap interface connected to the VM. 329 # 330 internetworking_model="@DEFNETWORKMODEL_FC@" 331 332 # disable guest seccomp 333 # Determines whether container seccomp profiles are passed to the virtual 334 # machine and applied by the kata agent. If set to true, seccomp is not applied 335 # within the guest 336 # (default: true) 337 disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ 338 339 # If enabled, the runtime will create opentracing.io traces and spans. 340 # (See https://www.jaegertracing.io/docs/getting-started). 341 # (default: disabled) 342 #enable_tracing = true 343 344 # If enabled, the runtime will not create a network namespace for shim and hypervisor processes. 345 # This option may have some potential impacts to your host. It should only be used when you know what you're doing. 346 # `disable_new_netns` conflicts with `enable_netmon` 347 # `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only 348 # with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge 349 # (like OVS) directly. 350 # If you are using docker, `disable_new_netns` only works with `docker run --net=none` 351 # (default: false) 352 #disable_new_netns = true 353 354 # if enable, the runtime will add all the kata processes inside one dedicated cgroup. 355 # The container cgroups in the host are not created, just one single cgroup per sandbox. 356 # The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. 357 # The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. 358 # The sandbox cgroup is constrained if there is no container type annotation. 359 # See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType 360 sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ 361 362 # Enabled experimental feature list, format: ["a", "b"]. 363 # Experimental features are features not stable enough for production, 364 # they may break compatibility, and are prepared for a big version bump. 365 # Supported experimental features: 366 # (default: []) 367 experimental=@DEFAULTEXPFEATURES@ 368 369 # If enabled, containers are allowed to join the pid namespace of the agent 370 # when the env variable KATA_AGENT_PIDNS is set for a container. 371 # Use this with caution and only when required, as this option allows the container 372 # to access the agent process. It is recommended to enable this option 373 # only in debug scenarios and with containers with lowered priveleges. 374 #enable_agent_pidns = true