gitee.com/leisunstar/runtime@v0.0.0-20200521203717-5cef3e7b53f9/cli/config/configuration-qemu.toml.in (about)

     1  # Copyright (c) 2017-2019 Intel Corporation
     2  #
     3  # SPDX-License-Identifier: Apache-2.0
     4  #
     5  
     6  # XXX: WARNING: this file is auto-generated.
     7  # XXX:
     8  # XXX: Source file: "@CONFIG_QEMU_IN@"
     9  # XXX: Project:
    10  # XXX:   Name: @PROJECT_NAME@
    11  # XXX:   Type: @PROJECT_TYPE@
    12  
    13  [hypervisor.qemu]
    14  path = "@QEMUPATH@"
    15  kernel = "@KERNELPATH@"
    16  initrd = "@INITRDPATH@"
    17  image = "@IMAGEPATH@"
    18  machine_type = "@MACHINETYPE@"
    19  
    20  # Optional space-separated list of options to pass to the guest kernel.
    21  # For example, use `kernel_params = "vsyscall=emulate"` if you are having
    22  # trouble running pre-2.15 glibc.
    23  #
    24  # WARNING: - any parameter specified here will take priority over the default
    25  # parameter value of the same name used to start the virtual machine.
    26  # Do not set values here unless you understand the impact of doing so as you
    27  # may stop the virtual machine from booting.
    28  # To see the list of default parameters, enable hypervisor debug, create a
    29  # container and look for 'default-kernel-parameters' log entries.
    30  kernel_params = "@KERNELPARAMS@"
    31  
    32  # Path to the firmware.
    33  # If you want that qemu uses the default firmware leave this option empty
    34  firmware = "@FIRMWAREPATH@"
    35  
    36  # Machine accelerators
    37  # comma-separated list of machine accelerators to pass to the hypervisor.
    38  # For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
    39  machine_accelerators="@MACHINEACCELERATORS@"
    40  
    41  # CPU features
    42  # comma-separated list of cpu features to pass to the cpu
    43  # For example, `cpu_features = "pmu=off,vmx=off"
    44  cpu_features="@CPUFEATURES@"
    45  
    46  # Default number of vCPUs per SB/VM:
    47  # unspecified or 0                --> will be set to @DEFVCPUS@
    48  # < 0                             --> will be set to the actual number of physical cores
    49  # > 0 <= number of physical cores --> will be set to the specified number
    50  # > number of physical cores      --> will be set to the actual number of physical cores
    51  default_vcpus = 1
    52  
    53  # Default maximum number of vCPUs per SB/VM:
    54  # unspecified or == 0             --> will be set to the actual number of physical cores or to the maximum number
    55  #                                     of vCPUs supported by KVM if that number is exceeded
    56  # > 0 <= number of physical cores --> will be set to the specified number
    57  # > number of physical cores      --> will be set to the actual number of physical cores or to the maximum number
    58  #                                     of vCPUs supported by KVM if that number is exceeded
    59  # WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when
    60  # the actual number of physical cores is greater than it.
    61  # WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU
    62  # the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs
    63  # can be added to a SB/VM, but the memory footprint will be big. Another example, with
    64  # `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of
    65  # vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
    66  # unless you know what are you doing.
    67  # NOTICE: on arm platform with gicv2 interrupt controller, set it to 8.
    68  default_maxvcpus = @DEFMAXVCPUS@
    69  
    70  # Bridges can be used to hot plug devices.
    71  # Limitations:
    72  # * Currently only pci bridges are supported
    73  # * Until 30 devices per bridge can be hot plugged.
    74  # * Until 5 PCI bridges can be cold plugged per VM.
    75  #   This limitation could be a bug in qemu or in the kernel
    76  # Default number of bridges per SB/VM:
    77  # unspecified or 0   --> will be set to @DEFBRIDGES@
    78  # > 1 <= 5           --> will be set to the specified number
    79  # > 5                --> will be set to 5
    80  default_bridges = @DEFBRIDGES@
    81  
    82  # Default memory size in MiB for SB/VM.
    83  # If unspecified then it will be set @DEFMEMSZ@ MiB.
    84  default_memory = @DEFMEMSZ@
    85  #
    86  # Default memory slots per SB/VM.
    87  # If unspecified then it will be set @DEFMEMSLOTS@.
    88  # This is will determine the times that memory will be hotadded to sandbox/VM.
    89  #memory_slots = @DEFMEMSLOTS@
    90  
    91  # The size in MiB will be plused to max memory of hypervisor.
    92  # It is the memory address space for the NVDIMM devie.
    93  # If set block storage driver (block_device_driver) to "nvdimm",
    94  # should set memory_offset to the size of block device.
    95  # Default 0
    96  #memory_offset = 0
    97  
    98  # Specifies virtio-mem will be enabled or not.
    99  # Please note that this option should be used with the command
   100  # "echo 1 > /proc/sys/vm/overcommit_memory".
   101  # Default false
   102  #enable_virtio_mem = true
   103  
   104  # Disable block device from being used for a container's rootfs.
   105  # In case of a storage driver like devicemapper where a container's 
   106  # root file system is backed by a block device, the block device is passed
   107  # directly to the hypervisor for performance reasons. 
   108  # This flag prevents the block device from being passed to the hypervisor, 
   109  # 9pfs is used instead to pass the rootfs.
   110  disable_block_device_use = @DEFDISABLEBLOCK@
   111  
   112  # Shared file system type:
   113  #   - virtio-9p (default)
   114  #   - virtio-fs
   115  shared_fs = "@DEFSHAREDFS@"
   116  
   117  # Path to vhost-user-fs daemon.
   118  virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"
   119  
   120  # Default size of DAX cache in MiB
   121  virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
   122  
   123  # Extra args for virtiofsd daemon
   124  #
   125  # Format example:
   126  #   ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"]
   127  #
   128  # see `virtiofsd -h` for possible options.
   129  virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
   130  
   131  # Cache mode:
   132  #
   133  #  - none
   134  #    Metadata, data, and pathname lookup are not cached in guest. They are
   135  #    always fetched from host and any changes are immediately pushed to host.
   136  #
   137  #  - auto
   138  #    Metadata and pathname lookup cache expires after a configured amount of
   139  #    time (default is 1 second). Data is cached while the file is open (close
   140  #    to open consistency).
   141  #
   142  #  - always
   143  #    Metadata, data, and pathname lookup are cached in guest and never expire.
   144  virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
   145  
   146  # Block storage driver to be used for the hypervisor in case the container
   147  # rootfs is backed by a block device. This is virtio-scsi, virtio-blk
   148  # or nvdimm.
   149  block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@"
   150  
   151  # Specifies cache-related options will be set to block devices or not.
   152  # Default false
   153  #block_device_cache_set = true
   154  
   155  # Specifies cache-related options for block devices.
   156  # Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
   157  # Default false
   158  #block_device_cache_direct = true
   159  
   160  # Specifies cache-related options for block devices.
   161  # Denotes whether flush requests for the device are ignored.
   162  # Default false
   163  #block_device_cache_noflush = true
   164  
   165  # Enable iothreads (data-plane) to be used. This causes IO to be
   166  # handled in a separate IO thread. This is currently only implemented
   167  # for SCSI.
   168  #
   169  enable_iothreads = @DEFENABLEIOTHREADS@
   170  
   171  # Enable pre allocation of VM RAM, default false
   172  # Enabling this will result in lower container density
   173  # as all of the memory will be allocated and locked
   174  # This is useful when you want to reserve all the memory
   175  # upfront or in the cases where you want memory latencies
   176  # to be very predictable
   177  # Default false
   178  #enable_mem_prealloc = true
   179  
   180  # Enable huge pages for VM RAM, default false
   181  # Enabling this will result in the VM memory
   182  # being allocated using huge pages.
   183  # This is useful when you want to use vhost-user network
   184  # stacks within the container. This will automatically 
   185  # result in memory pre allocation
   186  #enable_hugepages = true
   187  
   188  # Enable vhost-user storage device, default false
   189  # Enabling this will result in some Linux reserved block type
   190  # major range 240-254 being chosen to represent vhost-user devices.
   191  enable_vhost_user_store = @DEFENABLEVHOSTUSERSTORE@
   192  
   193  # The base directory specifically used for vhost-user devices.
   194  # Its sub-path "block" is used for block devices; "block/sockets" is
   195  # where we expect vhost-user sockets to live; "block/devices" is where
   196  # simulated block device nodes for vhost-user devices to live.
   197  vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
   198  
   199  # Enable file based guest memory support. The default is an empty string which
   200  # will disable this feature. In the case of virtio-fs, this is enabled
   201  # automatically and '/dev/shm' is used as the backing folder.
   202  # This option will be ignored if VM templating is enabled.
   203  #file_mem_backend = ""
   204  
   205  # Enable swap of vm memory. Default false.
   206  # The behaviour is undefined if mem_prealloc is also set to true
   207  #enable_swap = true
   208  
   209  # This option changes the default hypervisor and kernel parameters
   210  # to enable debug output where available. This extra output is added
   211  # to the proxy logs, but only when proxy debug is also enabled.
   212  # 
   213  # Default false
   214  #enable_debug = true
   215  
   216  # Disable the customizations done in the runtime when it detects
   217  # that it is running on top a VMM. This will result in the runtime
   218  # behaving as it would when running on bare metal.
   219  # 
   220  #disable_nesting_checks = true
   221  
   222  # This is the msize used for 9p shares. It is the number of bytes 
   223  # used for 9p packet payload.
   224  #msize_9p = @DEFMSIZE9P@
   225  
   226  # If true and vsocks are supported, use vsocks to communicate directly
   227  # with the agent and no proxy is started, otherwise use unix
   228  # sockets and start a proxy to communicate with the agent.
   229  # Default false
   230  #use_vsock = true
   231  
   232  # If false and nvdimm is supported, use nvdimm device to plug guest image.
   233  # Otherwise virtio-block device is used.
   234  # Default is false
   235  #disable_image_nvdimm = true
   236  
   237  # VFIO devices are hotplugged on a bridge by default. 
   238  # Enable hotplugging on root bus. This may be required for devices with
   239  # a large PCI bar, as this is a current limitation with hotplugging on 
   240  # a bridge. This value is valid for "pc" machine type.
   241  # Default false
   242  #hotplug_vfio_on_root_bus = true
   243  
   244  # Before hot plugging a PCIe device, you need to add a pcie_root_port device.
   245  # Use this parameter when using some large PCI bar devices, such as Nvidia GPU
   246  # The value means the number of pcie_root_port
   247  # This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35"
   248  # Default 0
   249  #pcie_root_port = 2
   250  
   251  # If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
   252  # security (vhost-net runs ring0) for network I/O performance. 
   253  #disable_vhost_net = true
   254  
   255  #
   256  # Default entropy source.
   257  # The path to a host source of entropy (including a real hardware RNG)
   258  # /dev/urandom and /dev/random are two main options.
   259  # Be aware that /dev/random is a blocking source of entropy.  If the host
   260  # runs out of entropy, the VMs boot time will increase leading to get startup
   261  # timeouts.
   262  # The source of entropy /dev/urandom is non-blocking and provides a
   263  # generally acceptable source of entropy. It should work well for pretty much
   264  # all practical purposes.
   265  #entropy_source= "@DEFENTROPYSOURCE@"
   266  
   267  # Path to OCI hook binaries in the *guest rootfs*.
   268  # This does not affect host-side hooks which must instead be added to
   269  # the OCI spec passed to the runtime.
   270  #
   271  # You can create a rootfs with hooks by customizing the osbuilder scripts:
   272  # https://github.com/kata-containers/osbuilder
   273  #
   274  # Hooks must be stored in a subdirectory of guest_hook_path according to their
   275  # hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
   276  # The agent will scan these directories for executable files and add them, in
   277  # lexicographical order, to the lifecycle of the guest container.
   278  # Hooks are executed in the runtime namespace of the guest. See the official documentation:
   279  # https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
   280  # Warnings will be logged if any error is encountered will scanning for hooks,
   281  # but it will not abort container execution.
   282  #guest_hook_path = "/usr/share/oci/hooks"
   283  
   284  [factory]
   285  # VM templating support. Once enabled, new VMs are created from template
   286  # using vm cloning. They will share the same initial kernel, initramfs and
   287  # agent memory by mapping it readonly. It helps speeding up new container
   288  # creation and saves a lot of memory if there are many kata containers running
   289  # on the same host.
   290  #
   291  # When disabled, new VMs are created from scratch.
   292  #
   293  # Note: Requires "initrd=" to be set ("image=" is not supported).
   294  #
   295  # Default false
   296  #enable_template = true
   297  
   298  # Specifies the path of template.
   299  #
   300  # Default "/run/vc/vm/template"
   301  #template_path = "/run/vc/vm/template"
   302  
   303  # The number of caches of VMCache:
   304  # unspecified or == 0   --> VMCache is disabled
   305  # > 0                   --> will be set to the specified number
   306  #
   307  # VMCache is a function that creates VMs as caches before using it.
   308  # It helps speed up new container creation.
   309  # The function consists of a server and some clients communicating
   310  # through Unix socket.  The protocol is gRPC in protocols/cache/cache.proto.
   311  # The VMCache server will create some VMs and cache them by factory cache.
   312  # It will convert the VM to gRPC format and transport it when gets
   313  # requestion from clients.
   314  # Factory grpccache is the VMCache client.  It will request gRPC format
   315  # VM and convert it back to a VM.  If VMCache function is enabled,
   316  # kata-runtime will request VM from factory grpccache when it creates
   317  # a new sandbox.
   318  #
   319  # Default 0
   320  #vm_cache_number = 0
   321  
   322  # Specify the address of the Unix socket that is used by VMCache.
   323  #
   324  # Default /var/run/kata-containers/cache.sock
   325  #vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
   326  
   327  [proxy.@PROJECT_TYPE@]
   328  path = "@PROXYPATH@"
   329  
   330  # If enabled, proxy messages will be sent to the system log
   331  # (default: disabled)
   332  #enable_debug = true
   333  
   334  [shim.@PROJECT_TYPE@]
   335  path = "@SHIMPATH@"
   336  
   337  # If enabled, shim messages will be sent to the system log
   338  # (default: disabled)
   339  #enable_debug = true
   340  
   341  # If enabled, the shim will create opentracing.io traces and spans.
   342  # (See https://www.jaegertracing.io/docs/getting-started).
   343  #
   344  # Note: By default, the shim runs in a separate network namespace. Therefore,
   345  # to allow it to send trace details to the Jaeger agent running on the host,
   346  # it is necessary to set 'disable_new_netns=true' so that it runs in the host
   347  # network namespace.
   348  #
   349  # (default: disabled)
   350  #enable_tracing = true
   351  
   352  [agent.@PROJECT_TYPE@]
   353  # If enabled, make the agent display debug-level messages.
   354  # (default: disabled)
   355  #enable_debug = true
   356  
   357  # Enable agent tracing.
   358  #
   359  # If enabled, the default trace mode is "dynamic" and the
   360  # default trace type is "isolated". The trace mode and type are set
   361  # explicity with the `trace_type=` and `trace_mode=` options.
   362  #
   363  # Notes:
   364  #
   365  # - Tracing is ONLY enabled when `enable_tracing` is set: explicitly
   366  #   setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing`
   367  #   will NOT activate agent tracing.
   368  #
   369  # - See https://github.com/kata-containers/agent/blob/master/TRACING.md for
   370  #   full details.
   371  #
   372  # (default: disabled)
   373  #enable_tracing = true
   374  #
   375  #trace_mode = "dynamic"
   376  #trace_type = "isolated"
   377  
   378  # Comma separated list of kernel modules and their parameters.
   379  # These modules will be loaded in the guest kernel using modprobe(8).
   380  # The following example can be used to load two kernel modules with parameters
   381  #  - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
   382  # The first word is considered as the module name and the rest as its parameters.
   383  # Container will not be started when:
   384  #  * A kernel module is specified and the modprobe command is not installed in the guest
   385  #    or it fails loading the module.
   386  #  * The module is not available in the guest or it doesn't met the guest kernel
   387  #    requirements, like architecture and version.
   388  #
   389  kernel_modules=[]
   390  
   391  
   392  [netmon]
   393  # If enabled, the network monitoring process gets started when the
   394  # sandbox is created. This allows for the detection of some additional
   395  # network being added to the existing network namespace, after the
   396  # sandbox has been created.
   397  # (default: disabled)
   398  #enable_netmon = true
   399  
   400  # Specify the path to the netmon binary.
   401  path = "@NETMONPATH@"
   402  
   403  # If enabled, netmon messages will be sent to the system log
   404  # (default: disabled)
   405  #enable_debug = true
   406  
   407  [runtime]
   408  # If enabled, the runtime will log additional debug messages to the
   409  # system log
   410  # (default: disabled)
   411  #enable_debug = true
   412  #
   413  # Internetworking model
   414  # Determines how the VM should be connected to the
   415  # the container network interface
   416  # Options:
   417  #
   418  #   - macvtap
   419  #     Used when the Container network interface can be bridged using
   420  #     macvtap.
   421  #
   422  #   - none
   423  #     Used when customize network. Only creates a tap device. No veth pair.
   424  #
   425  #   - tcfilter
   426  #     Uses tc filter rules to redirect traffic from the network interface
   427  #     provided by plugin to a tap interface connected to the VM.
   428  #
   429  internetworking_model="@DEFNETWORKMODEL_QEMU@"
   430  
   431  # disable guest seccomp
   432  # Determines whether container seccomp profiles are passed to the virtual
   433  # machine and applied by the kata agent. If set to true, seccomp is not applied
   434  # within the guest
   435  # (default: true)
   436  disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
   437  
   438  # If enabled, the runtime will create opentracing.io traces and spans.
   439  # (See https://www.jaegertracing.io/docs/getting-started).
   440  # (default: disabled)
   441  #enable_tracing = true
   442  
   443  # If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
   444  # This option may have some potential impacts to your host. It should only be used when you know what you're doing.
   445  # `disable_new_netns` conflicts with `enable_netmon`
   446  # `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
   447  # with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
   448  # (like OVS) directly.
   449  # If you are using docker, `disable_new_netns` only works with `docker run --net=none`
   450  # (default: false)
   451  #disable_new_netns = true
   452  
   453  # if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
   454  # The container cgroups in the host are not created, just one single cgroup per sandbox.
   455  # The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
   456  # The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
   457  # The sandbox cgroup is constrained if there is no container type annotation.
   458  # See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType
   459  sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
   460  
   461  # Enabled experimental feature list, format: ["a", "b"].
   462  # Experimental features are features not stable enough for production,
   463  # they may break compatibility, and are prepared for a big version bump.
   464  # Supported experimental features:
   465  # (default: [])
   466  experimental=@DEFAULTEXPFEATURES@