github.com/kata-containers/runtime@v0.0.0-20210505125100-04f29832a923/cli/config/configuration-qemu.toml.in (about)

     1  # Copyright (c) 2017-2019 Intel Corporation
     2  #
     3  # SPDX-License-Identifier: Apache-2.0
     4  #
     5  
     6  # XXX: WARNING: this file is auto-generated.
     7  # XXX:
     8  # XXX: Source file: "@CONFIG_QEMU_IN@"
     9  # XXX: Project:
    10  # XXX:   Name: @PROJECT_NAME@
    11  # XXX:   Type: @PROJECT_TYPE@
    12  
    13  [hypervisor.qemu]
    14  path = "@QEMUPATH@"
    15  kernel = "@KERNELPATH@"
    16  initrd = "@INITRDPATH@"
    17  image = "@IMAGEPATH@"
    18  machine_type = "@MACHINETYPE@"
    19  
    20  # List of valid annotation names for the hypervisor
    21  # Each member of the list is a regular expression, which is the base name
    22  # of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
    23  # The default if not set is empty (all annotations rejected.)
    24  # Your distribution recommends: @DEFENABLEANNOTATIONS@
    25  enable_annotations = @DEFENABLEANNOTATIONS@
    26  
    27  # List of valid annotation values for the hypervisor path
    28  # Each member of the list is a path pattern as described by glob(3).
    29  # The default if not set is empty (all annotations rejected.)
    30  # Your distribution recommends: @QEMUVALIDHYPERVISORPATHS@
    31  valid_hypervisor_paths = @QEMUVALIDHYPERVISORPATHS@
    32  
    33  # Optional space-separated list of options to pass to the guest kernel.
    34  # For example, use `kernel_params = "vsyscall=emulate"` if you are having
    35  # trouble running pre-2.15 glibc.
    36  #
    37  # WARNING: - any parameter specified here will take priority over the default
    38  # parameter value of the same name used to start the virtual machine.
    39  # Do not set values here unless you understand the impact of doing so as you
    40  # may stop the virtual machine from booting.
    41  # To see the list of default parameters, enable hypervisor debug, create a
    42  # container and look for 'default-kernel-parameters' log entries.
    43  kernel_params = "@KERNELPARAMS@"
    44  
    45  # Path to the firmware.
    46  # If you want that qemu uses the default firmware leave this option empty
    47  firmware = "@FIRMWAREPATH@"
    48  
    49  # Machine accelerators
    50  # comma-separated list of machine accelerators to pass to the hypervisor.
    51  # For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
    52  machine_accelerators="@MACHINEACCELERATORS@"
    53  
    54  # CPU features
    55  # comma-separated list of cpu features to pass to the cpu
    56  # For example, `cpu_features = "pmu=off,vmx=off"
    57  cpu_features="@CPUFEATURES@"
    58  
    59  # Default number of vCPUs per SB/VM:
    60  # unspecified or 0                --> will be set to @DEFVCPUS@
    61  # < 0                             --> will be set to the actual number of physical cores
    62  # > 0 <= number of physical cores --> will be set to the specified number
    63  # > number of physical cores      --> will be set to the actual number of physical cores
    64  default_vcpus = 1
    65  
    66  # Default maximum number of vCPUs per SB/VM:
    67  # unspecified or == 0             --> will be set to the actual number of physical cores or to the maximum number
    68  #                                     of vCPUs supported by KVM if that number is exceeded
    69  # > 0 <= number of physical cores --> will be set to the specified number
    70  # > number of physical cores      --> will be set to the actual number of physical cores or to the maximum number
    71  #                                     of vCPUs supported by KVM if that number is exceeded
    72  # WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when
    73  # the actual number of physical cores is greater than it.
    74  # WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU
    75  # the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs
    76  # can be added to a SB/VM, but the memory footprint will be big. Another example, with
    77  # `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of
    78  # vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
    79  # unless you know what are you doing.
    80  # NOTICE: on arm platform with gicv2 interrupt controller, set it to 8.
    81  default_maxvcpus = @DEFMAXVCPUS@
    82  
    83  # Bridges can be used to hot plug devices.
    84  # Limitations:
    85  # * Currently only pci bridges are supported
    86  # * Until 30 devices per bridge can be hot plugged.
    87  # * Until 5 PCI bridges can be cold plugged per VM.
    88  #   This limitation could be a bug in qemu or in the kernel
    89  # Default number of bridges per SB/VM:
    90  # unspecified or 0   --> will be set to @DEFBRIDGES@
    91  # > 1 <= 5           --> will be set to the specified number
    92  # > 5                --> will be set to 5
    93  default_bridges = @DEFBRIDGES@
    94  
    95  # Default memory size in MiB for SB/VM.
    96  # If unspecified then it will be set @DEFMEMSZ@ MiB.
    97  default_memory = @DEFMEMSZ@
    98  #
    99  # Default memory slots per SB/VM.
   100  # If unspecified then it will be set @DEFMEMSLOTS@.
   101  # This is will determine the times that memory will be hotadded to sandbox/VM.
   102  #memory_slots = @DEFMEMSLOTS@
   103  
   104  # The size in MiB will be plused to max memory of hypervisor.
   105  # It is the memory address space for the NVDIMM devie.
   106  # If set block storage driver (block_device_driver) to "nvdimm",
   107  # should set memory_offset to the size of block device.
   108  # Default 0
   109  #memory_offset = 0
   110  
   111  # Specifies virtio-mem will be enabled or not.
   112  # Please note that this option should be used with the command
   113  # "echo 1 > /proc/sys/vm/overcommit_memory".
   114  # Default false
   115  #enable_virtio_mem = true
   116  
   117  # Disable block device from being used for a container's rootfs.
   118  # In case of a storage driver like devicemapper where a container's
   119  # root file system is backed by a block device, the block device is passed
   120  # directly to the hypervisor for performance reasons.
   121  # This flag prevents the block device from being passed to the hypervisor,
   122  # 9pfs is used instead to pass the rootfs.
   123  disable_block_device_use = @DEFDISABLEBLOCK@
   124  
   125  # Shared file system type:
   126  #   - virtio-9p (default)
   127  #   - virtio-fs
   128  shared_fs = "@DEFSHAREDFS@"
   129  
   130  # Path to vhost-user-fs daemon.
   131  virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"
   132  
   133  # List of valid annotation values for the virtiofs daemon path
   134  # Each member of the list is a path pattern as described by glob(3).
   135  # The default if not set is empty (all annotations rejected.)
   136  # Your distribution recommends: @DEFVALIDVIRTIOFSDAEMONPATHS@
   137  valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@
   138  
   139  # Default size of DAX cache in MiB
   140  virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
   141  
   142  # Extra args for virtiofsd daemon
   143  #
   144  # Format example:
   145  #   ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"]
   146  #
   147  # see `virtiofsd -h` for possible options.
   148  virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
   149  
   150  # Cache mode:
   151  #
   152  #  - none
   153  #    Metadata, data, and pathname lookup are not cached in guest. They are
   154  #    always fetched from host and any changes are immediately pushed to host.
   155  #
   156  #  - auto
   157  #    Metadata and pathname lookup cache expires after a configured amount of
   158  #    time (default is 1 second). Data is cached while the file is open (close
   159  #    to open consistency).
   160  #
   161  #  - always
   162  #    Metadata, data, and pathname lookup are cached in guest and never expire.
   163  virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
   164  
   165  # Block storage driver to be used for the hypervisor in case the container
   166  # rootfs is backed by a block device. This is virtio-scsi, virtio-blk
   167  # or nvdimm.
   168  block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@"
   169  
   170  # Specifies cache-related options will be set to block devices or not.
   171  # Default false
   172  #block_device_cache_set = true
   173  
   174  # Specifies cache-related options for block devices.
   175  # Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
   176  # Default false
   177  #block_device_cache_direct = true
   178  
   179  # Specifies cache-related options for block devices.
   180  # Denotes whether flush requests for the device are ignored.
   181  # Default false
   182  #block_device_cache_noflush = true
   183  
   184  # Enable iothreads (data-plane) to be used. This causes IO to be
   185  # handled in a separate IO thread. This is currently only implemented
   186  # for SCSI.
   187  #
   188  enable_iothreads = @DEFENABLEIOTHREADS@
   189  
   190  # Enable pre allocation of VM RAM, default false
   191  # Enabling this will result in lower container density
   192  # as all of the memory will be allocated and locked
   193  # This is useful when you want to reserve all the memory
   194  # upfront or in the cases where you want memory latencies
   195  # to be very predictable
   196  # Default false
   197  #enable_mem_prealloc = true
   198  
   199  # Enable huge pages for VM RAM, default false
   200  # Enabling this will result in the VM memory
   201  # being allocated using huge pages.
   202  # This is useful when you want to use vhost-user network
   203  # stacks within the container. This will automatically
   204  # result in memory pre allocation
   205  #enable_hugepages = true
   206  
   207  # Enable vhost-user storage device, default false
   208  # Enabling this will result in some Linux reserved block type
   209  # major range 240-254 being chosen to represent vhost-user devices.
   210  enable_vhost_user_store = @DEFENABLEVHOSTUSERSTORE@
   211  
   212  # The base directory specifically used for vhost-user devices.
   213  # Its sub-path "block" is used for block devices; "block/sockets" is
   214  # where we expect vhost-user sockets to live; "block/devices" is where
   215  # simulated block device nodes for vhost-user devices to live.
   216  vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
   217  
   218  # Enable vIOMMU, default false
   219  # Enabling this will result in the VM having a vIOMMU device
   220  # This will also add the following options to the kernel's
   221  # command line: intel_iommu=on,iommu=pt
   222  #enable_iommu = true
   223  
   224  # Enable IOMMU_PLATFORM, default false
   225  # Enabling this will result in the VM device having iommu_platform=on set
   226  #enable_iommu_platform = true
   227  
   228  # List of valid annotation values for the vhost user store path
   229  # Each member of the list is a path pattern as described by glob(3).
   230  # The default if not set is empty (all annotations rejected.)
   231  # Your distribution recommends: @DEFVALIDVHOSTUSERSTOREPATHS@
   232  valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@
   233  
   234  # Enable file based guest memory support. The default is an empty string which
   235  # will disable this feature. In the case of virtio-fs, this is enabled
   236  # automatically and '/dev/shm' is used as the backing folder.
   237  # This option will be ignored if VM templating is enabled.
   238  #file_mem_backend = "@DEFFILEMEMBACKEND@"
   239  
   240  # List of valid annotation values for the file_mem_backend path
   241  # Each member of the list is a path pattern as described by glob(3).
   242  # The default if not set is empty (all annotations rejected.)
   243  # Your distribution recommends: @DEFVALIDFILEMEMBACKENDS@
   244  valid_file_mem_backends = @DEFVALIDFILEMEMBACKENDS@
   245  
   246  # -pflash can add image file to VM. The arguments of it should be in format
   247  # of ["/path/to/flash0.img", "/path/to/flash1.img"]
   248  pflashes = []
   249  
   250  # Enable swap of vm memory. Default false.
   251  # The behaviour is undefined if mem_prealloc is also set to true
   252  #enable_swap = true
   253  
   254  # This option changes the default hypervisor and kernel parameters
   255  # to enable debug output where available. This extra output is added
   256  # to the proxy logs, but only when proxy debug is also enabled.
   257  # Note: Debug output will only be put into qemu.log in the event
   258  # of a virtual hardware issue, otherwise it will be empty.
   259  #
   260  # Default false
   261  #enable_debug = true
   262  
   263  # Disable the customizations done in the runtime when it detects
   264  # that it is running on top a VMM. This will result in the runtime
   265  # behaving as it would when running on bare metal.
   266  #
   267  #disable_nesting_checks = true
   268  
   269  # This is the msize used for 9p shares. It is the number of bytes
   270  # used for 9p packet payload.
   271  #msize_9p = @DEFMSIZE9P@
   272  
   273  # If true and vsocks are supported, use vsocks to communicate directly
   274  # with the agent and no proxy is started, otherwise use unix
   275  # sockets and start a proxy to communicate with the agent.
   276  # Default false
   277  #use_vsock = true
   278  
   279  # If false and nvdimm is supported, use nvdimm device to plug guest image.
   280  # Otherwise virtio-block device is used.
   281  # Default is false
   282  #disable_image_nvdimm = true
   283  
   284  # VFIO devices are hotplugged on a bridge by default.
   285  # Enable hotplugging on root bus. This may be required for devices with
   286  # a large PCI bar, as this is a current limitation with hotplugging on
   287  # a bridge. This value is valid for "pc" machine type.
   288  # Default false
   289  #hotplug_vfio_on_root_bus = true
   290  
   291  # Before hot plugging a PCIe device, you need to add a pcie_root_port device.
   292  # Use this parameter when using some large PCI bar devices, such as Nvidia GPU
   293  # The value means the number of pcie_root_port
   294  # This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35"
   295  # Default 0
   296  #pcie_root_port = 2
   297  
   298  # If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
   299  # security (vhost-net runs ring0) for network I/O performance.
   300  #disable_vhost_net = true
   301  
   302  #
   303  # Default entropy source.
   304  # The path to a host source of entropy (including a real hardware RNG)
   305  # /dev/urandom and /dev/random are two main options.
   306  # Be aware that /dev/random is a blocking source of entropy.  If the host
   307  # runs out of entropy, the VMs boot time will increase leading to get startup
   308  # timeouts.
   309  # The source of entropy /dev/urandom is non-blocking and provides a
   310  # generally acceptable source of entropy. It should work well for pretty much
   311  # all practical purposes.
   312  #entropy_source= "@DEFENTROPYSOURCE@"
   313  
   314  # Path to OCI hook binaries in the *guest rootfs*.
   315  # This does not affect host-side hooks which must instead be added to
   316  # the OCI spec passed to the runtime.
   317  #
   318  # You can create a rootfs with hooks by customizing the osbuilder scripts:
   319  # https://github.com/kata-containers/osbuilder
   320  #
   321  # Hooks must be stored in a subdirectory of guest_hook_path according to their
   322  # hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
   323  # The agent will scan these directories for executable files and add them, in
   324  # lexicographical order, to the lifecycle of the guest container.
   325  # Hooks are executed in the runtime namespace of the guest. See the official documentation:
   326  # https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
   327  # Warnings will be logged if any error is encountered will scanning for hooks,
   328  # but it will not abort container execution.
   329  #guest_hook_path = "/usr/share/oci/hooks"
   330  
   331  [factory]
   332  # VM templating support. Once enabled, new VMs are created from template
   333  # using vm cloning. They will share the same initial kernel, initramfs and
   334  # agent memory by mapping it readonly. It helps speeding up new container
   335  # creation and saves a lot of memory if there are many kata containers running
   336  # on the same host.
   337  #
   338  # When disabled, new VMs are created from scratch.
   339  #
   340  # Note: Requires "initrd=" to be set ("image=" is not supported).
   341  #
   342  # Default false
   343  #enable_template = true
   344  
   345  # Specifies the path of template.
   346  #
   347  # Default "/run/vc/vm/template"
   348  #template_path = "/run/vc/vm/template"
   349  
   350  # The number of caches of VMCache:
   351  # unspecified or == 0   --> VMCache is disabled
   352  # > 0                   --> will be set to the specified number
   353  #
   354  # VMCache is a function that creates VMs as caches before using it.
   355  # It helps speed up new container creation.
   356  # The function consists of a server and some clients communicating
   357  # through Unix socket.  The protocol is gRPC in protocols/cache/cache.proto.
   358  # The VMCache server will create some VMs and cache them by factory cache.
   359  # It will convert the VM to gRPC format and transport it when gets
   360  # requestion from clients.
   361  # Factory grpccache is the VMCache client.  It will request gRPC format
   362  # VM and convert it back to a VM.  If VMCache function is enabled,
   363  # kata-runtime will request VM from factory grpccache when it creates
   364  # a new sandbox.
   365  #
   366  # Default 0
   367  #vm_cache_number = 0
   368  
   369  # Specify the address of the Unix socket that is used by VMCache.
   370  #
   371  # Default /var/run/kata-containers/cache.sock
   372  #vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
   373  
   374  [proxy.@PROJECT_TYPE@]
   375  path = "@PROXYPATH@"
   376  
   377  # If enabled, proxy messages will be sent to the system log
   378  # (default: disabled)
   379  #enable_debug = true
   380  
   381  [shim.@PROJECT_TYPE@]
   382  path = "@SHIMPATH@"
   383  
   384  # If enabled, shim messages will be sent to the system log
   385  # (default: disabled)
   386  #enable_debug = true
   387  
   388  # If enabled, the shim will create opentracing.io traces and spans.
   389  # (See https://www.jaegertracing.io/docs/getting-started).
   390  #
   391  # Note: By default, the shim runs in a separate network namespace. Therefore,
   392  # to allow it to send trace details to the Jaeger agent running on the host,
   393  # it is necessary to set 'disable_new_netns=true' so that it runs in the host
   394  # network namespace.
   395  #
   396  # (default: disabled)
   397  #enable_tracing = true
   398  
   399  [agent.@PROJECT_TYPE@]
   400  # If enabled, make the agent display debug-level messages.
   401  # (default: disabled)
   402  #enable_debug = true
   403  
   404  # Enable agent tracing.
   405  #
   406  # If enabled, the default trace mode is "dynamic" and the
   407  # default trace type is "isolated". The trace mode and type are set
   408  # explicity with the `trace_type=` and `trace_mode=` options.
   409  #
   410  # Notes:
   411  #
   412  # - Tracing is ONLY enabled when `enable_tracing` is set: explicitly
   413  #   setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing`
   414  #   will NOT activate agent tracing.
   415  #
   416  # - See https://github.com/kata-containers/agent/blob/master/TRACING.md for
   417  #   full details.
   418  #
   419  # (default: disabled)
   420  #enable_tracing = true
   421  #
   422  #trace_mode = "dynamic"
   423  #trace_type = "isolated"
   424  
   425  # Comma separated list of kernel modules and their parameters.
   426  # These modules will be loaded in the guest kernel using modprobe(8).
   427  # The following example can be used to load two kernel modules with parameters
   428  #  - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
   429  # The first word is considered as the module name and the rest as its parameters.
   430  # Container will not be started when:
   431  #  * A kernel module is specified and the modprobe command is not installed in the guest
   432  #    or it fails loading the module.
   433  #  * The module is not available in the guest or it doesn't met the guest kernel
   434  #    requirements, like architecture and version.
   435  #
   436  kernel_modules=[]
   437  
   438  
   439  [netmon]
   440  # If enabled, the network monitoring process gets started when the
   441  # sandbox is created. This allows for the detection of some additional
   442  # network being added to the existing network namespace, after the
   443  # sandbox has been created.
   444  # (default: disabled)
   445  #enable_netmon = true
   446  
   447  # Specify the path to the netmon binary.
   448  path = "@NETMONPATH@"
   449  
   450  # If enabled, netmon messages will be sent to the system log
   451  # (default: disabled)
   452  #enable_debug = true
   453  
   454  [runtime]
   455  # If enabled, the runtime will log additional debug messages to the
   456  # system log
   457  # (default: disabled)
   458  #enable_debug = true
   459  #
   460  # Internetworking model
   461  # Determines how the VM should be connected to the
   462  # the container network interface
   463  # Options:
   464  #
   465  #   - macvtap
   466  #     Used when the Container network interface can be bridged using
   467  #     macvtap.
   468  #
   469  #   - none
   470  #     Used when customize network. Only creates a tap device. No veth pair.
   471  #
   472  #   - tcfilter
   473  #     Uses tc filter rules to redirect traffic from the network interface
   474  #     provided by plugin to a tap interface connected to the VM.
   475  #
   476  internetworking_model="@DEFNETWORKMODEL_QEMU@"
   477  
   478  # disable guest seccomp
   479  # Determines whether container seccomp profiles are passed to the virtual
   480  # machine and applied by the kata agent. If set to true, seccomp is not applied
   481  # within the guest
   482  # (default: true)
   483  disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
   484  
   485  # If enabled, the runtime will create opentracing.io traces and spans.
   486  # (See https://www.jaegertracing.io/docs/getting-started).
   487  # (default: disabled)
   488  #enable_tracing = true
   489  
   490  # If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
   491  # This option may have some potential impacts to your host. It should only be used when you know what you're doing.
   492  # `disable_new_netns` conflicts with `enable_netmon`
   493  # `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
   494  # with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
   495  # (like OVS) directly.
   496  # If you are using docker, `disable_new_netns` only works with `docker run --net=none`
   497  # (default: false)
   498  #disable_new_netns = true
   499  
   500  # if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
   501  # The container cgroups in the host are not created, just one single cgroup per sandbox.
   502  # The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
   503  # The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
   504  # The sandbox cgroup is constrained if there is no container type annotation.
   505  # See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType
   506  sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
   507  
   508  # Enabled experimental feature list, format: ["a", "b"].
   509  # Experimental features are features not stable enough for production,
   510  # they may break compatibility, and are prepared for a big version bump.
   511  # Supported experimental features:
   512  # (default: [])
   513  experimental=@DEFAULTEXPFEATURES@
   514  
   515  
   516  # If enabled, containers are allowed to join the pid namespace of the agent
   517  # when the env variable KATA_AGENT_PIDNS is set for a container.
   518  # Use this with caution and only when required, as this option allows the container
   519  # to access the agent process. It is recommended to enable this option
   520  # only in debug scenarios and with containers with lowered priveleges.
   521  #enable_agent_pidns = true