gitee.com/leisunstar/runtime@v0.0.0-20200521203717-5cef3e7b53f9/cli/config/configuration-qemu-virtiofs.toml.in (about)

     1  # Copyright (c) 2017-2019 Intel Corporation
     2  #
     3  # SPDX-License-Identifier: Apache-2.0
     4  #
     5  
     6  # XXX: WARNING: this file is auto-generated.
     7  # XXX:
     8  # XXX: Source file: "@CONFIG_QEMU_VIRTIOFS_IN@"
     9  # XXX: Project:
    10  # XXX:   Name: @PROJECT_NAME@
    11  # XXX:   Type: @PROJECT_TYPE@
    12  
    13  [hypervisor.qemu]
    14  path = "@QEMUVIRTIOFSPATH@"
    15  kernel = "@KERNELVIRTIOFSPATH@"
    16  image = "@IMAGEPATH@"
    17  machine_type = "@MACHINETYPE@"
    18  
    19  # Optional space-separated list of options to pass to the guest kernel.
    20  # For example, use `kernel_params = "vsyscall=emulate"` if you are having
    21  # trouble running pre-2.15 glibc.
    22  #
    23  # WARNING: - any parameter specified here will take priority over the default
    24  # parameter value of the same name used to start the virtual machine.
    25  # Do not set values here unless you understand the impact of doing so as you
    26  # may stop the virtual machine from booting.
    27  # To see the list of default parameters, enable hypervisor debug, create a
    28  # container and look for 'default-kernel-parameters' log entries.
    29  kernel_params = "@KERNELPARAMS@"
    30  
    31  # Path to the firmware.
    32  # If you want that qemu uses the default firmware leave this option empty
    33  firmware = "@FIRMWAREPATH@"
    34  
    35  # Machine accelerators
    36  # comma-separated list of machine accelerators to pass to the hypervisor.
    37  # For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
    38  machine_accelerators="@MACHINEACCELERATORS@"
    39  
    40  # CPU features
    41  # comma-separated list of cpu features to pass to the cpu
    42  # For example, `cpu_features = "pmu=off,vmx=off"
    43  cpu_features="@CPUFEATURES@"
    44  
    45  # Default number of vCPUs per SB/VM:
    46  # unspecified or 0                --> will be set to @DEFVCPUS@
    47  # < 0                             --> will be set to the actual number of physical cores
    48  # > 0 <= number of physical cores --> will be set to the specified number
    49  # > number of physical cores      --> will be set to the actual number of physical cores
    50  default_vcpus = 1
    51  
    52  # Default maximum number of vCPUs per SB/VM:
    53  # unspecified or == 0             --> will be set to the actual number of physical cores or to the maximum number
    54  #                                     of vCPUs supported by KVM if that number is exceeded
    55  # > 0 <= number of physical cores --> will be set to the specified number
    56  # > number of physical cores      --> will be set to the actual number of physical cores or to the maximum number
    57  #                                     of vCPUs supported by KVM if that number is exceeded
    58  # WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when
    59  # the actual number of physical cores is greater than it.
    60  # WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU
    61  # the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs
    62  # can be added to a SB/VM, but the memory footprint will be big. Another example, with
    63  # `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of
    64  # vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
    65  # unless you know what are you doing.
    66  # NOTICE: on arm platform with gicv2 interrupt controller, set it to 8.
    67  default_maxvcpus = @DEFMAXVCPUS@
    68  
    69  # Bridges can be used to hot plug devices.
    70  # Limitations:
    71  # * Currently only pci bridges are supported
    72  # * Until 30 devices per bridge can be hot plugged.
    73  # * Until 5 PCI bridges can be cold plugged per VM.
    74  #   This limitation could be a bug in qemu or in the kernel
    75  # Default number of bridges per SB/VM:
    76  # unspecified or 0   --> will be set to @DEFBRIDGES@
    77  # > 1 <= 5           --> will be set to the specified number
    78  # > 5                --> will be set to 5
    79  default_bridges = @DEFBRIDGES@
    80  
    81  # Default memory size in MiB for SB/VM.
    82  # If unspecified then it will be set @DEFMEMSZ@ MiB.
    83  default_memory = @DEFMEMSZ@
    84  #
    85  # Default memory slots per SB/VM.
    86  # If unspecified then it will be set @DEFMEMSLOTS@.
    87  # This is will determine the times that memory will be hotadded to sandbox/VM.
    88  #memory_slots = @DEFMEMSLOTS@
    89  
    90  # The size in MiB will be plused to max memory of hypervisor.
    91  # It is the memory address space for the NVDIMM devie.
    92  # If set block storage driver (block_device_driver) to "nvdimm",
    93  # should set memory_offset to the size of block device.
    94  # Default 0
    95  #memory_offset = 0
    96  
    97  # Disable block device from being used for a container's rootfs.
    98  # In case of a storage driver like devicemapper where a container's
    99  # root file system is backed by a block device, the block device is passed
   100  # directly to the hypervisor for performance reasons.
   101  # This flag prevents the block device from being passed to the hypervisor,
   102  # 9pfs is used instead to pass the rootfs.
   103  disable_block_device_use = @DEFDISABLEBLOCK@
   104  
   105  # Shared file system type:
   106  #   - virtio-fs (default)
   107  #   - virtio-9p
   108  shared_fs = "@DEFSHAREDFS_QEMU_VIRTIOFS@"
   109  
   110  # Path to vhost-user-fs daemon.
   111  virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"
   112  
   113  # Default size of DAX cache in MiB
   114  virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
   115  
   116  # Extra args for virtiofsd daemon
   117  #
   118  # Format example:
   119  #   ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"]
   120  #
   121  # see `virtiofsd -h` for possible options.
   122  virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
   123  
   124  # Cache mode:
   125  #
   126  #  - none
   127  #    Metadata, data, and pathname lookup are not cached in guest. They are
   128  #    always fetched from host and any changes are immediately pushed to host.
   129  #
   130  #  - auto
   131  #    Metadata and pathname lookup cache expires after a configured amount of
   132  #    time (default is 1 second). Data is cached while the file is open (close
   133  #    to open consistency).
   134  #
   135  #  - always
   136  #    Metadata, data, and pathname lookup are cached in guest and never expire.
   137  virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
   138  
   139  # Block storage driver to be used for the hypervisor in case the container
   140  # rootfs is backed by a block device. This is virtio-scsi, virtio-blk
   141  # or nvdimm.
   142  block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@"
   143  
   144  # Specifies cache-related options will be set to block devices or not.
   145  # Default false
   146  #block_device_cache_set = true
   147  
   148  # Specifies cache-related options for block devices.
   149  # Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
   150  # Default false
   151  #block_device_cache_direct = true
   152  
   153  # Specifies cache-related options for block devices.
   154  # Denotes whether flush requests for the device are ignored.
   155  # Default false
   156  #block_device_cache_noflush = true
   157  
   158  # Enable iothreads (data-plane) to be used. This causes IO to be
   159  # handled in a separate IO thread. This is currently only implemented
   160  # for SCSI.
   161  #
   162  enable_iothreads = @DEFENABLEIOTHREADS@
   163  
   164  # Enable pre allocation of VM RAM, default false
   165  # Enabling this will result in lower container density
   166  # as all of the memory will be allocated and locked
   167  # This is useful when you want to reserve all the memory
   168  # upfront or in the cases where you want memory latencies
   169  # to be very predictable
   170  # Default false
   171  #enable_mem_prealloc = true
   172  
   173  # Enable huge pages for VM RAM, default false
   174  # Enabling this will result in the VM memory
   175  # being allocated using huge pages.
   176  # This is useful when you want to use vhost-user network
   177  # stacks within the container. This will automatically
   178  # result in memory pre allocation
   179  #enable_hugepages = true
   180  
   181  # Enable vhost-user storage device, default false
   182  # Enabling this will result in some Linux reserved block type
   183  # major range 240-254 being chosen to represent vhost-user devices.
   184  enable_vhost_user_store = @DEFENABLEVHOSTUSERSTORE@
   185  
   186  # The base directory specifically used for vhost-user devices.
   187  # Its sub-path "block" is used for block devices; "block/sockets" is
   188  # where we expect vhost-user sockets to live; "block/devices" is where
   189  # simulated block device nodes for vhost-user devices to live.
   190  vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
   191  
   192  # Enable file based guest memory support. The default is an empty string which
   193  # will disable this feature. In the case of virtio-fs, this is enabled
   194  # automatically and '/dev/shm' is used as the backing folder.
   195  # This option will be ignored if VM templating is enabled.
   196  #file_mem_backend = ""
   197  
   198  # Enable swap of vm memory. Default false.
   199  # The behaviour is undefined if mem_prealloc is also set to true
   200  #enable_swap = true
   201  
   202  # This option changes the default hypervisor and kernel parameters
   203  # to enable debug output where available. This extra output is added
   204  # to the proxy logs, but only when proxy debug is also enabled.
   205  #
   206  # Default false
   207  #enable_debug = true
   208  
   209  # Disable the customizations done in the runtime when it detects
   210  # that it is running on top a VMM. This will result in the runtime
   211  # behaving as it would when running on bare metal.
   212  #
   213  #disable_nesting_checks = true
   214  
   215  # This is the msize used for 9p shares. It is the number of bytes
   216  # used for 9p packet payload.
   217  #msize_9p = @DEFMSIZE9P@
   218  
   219  # If true and vsocks are supported, use vsocks to communicate directly
   220  # with the agent and no proxy is started, otherwise use unix
   221  # sockets and start a proxy to communicate with the agent.
   222  # Default false
   223  #use_vsock = true
   224  
   225  # If false and nvdimm is supported, use nvdimm device to plug guest image.
   226  # Otherwise virtio-block device is used.
   227  # Default false
   228  #disable_image_nvdimm = true
   229  
   230  # VFIO devices are hotplugged on a bridge by default.
   231  # Enable hotplugging on root bus. This may be required for devices with
   232  # a large PCI bar, as this is a current limitation with hotplugging on
   233  # a bridge. This value is valid for "pc" machine type.
   234  # Default false
   235  #hotplug_vfio_on_root_bus = true
   236  
   237  # If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
   238  # security (vhost-net runs ring0) for network I/O performance. 
   239  #disable_vhost_net = true
   240  
   241  #
   242  # Default entropy source.
   243  # The path to a host source of entropy (including a real hardware RNG)
   244  # /dev/urandom and /dev/random are two main options.
   245  # Be aware that /dev/random is a blocking source of entropy.  If the host
   246  # runs out of entropy, the VMs boot time will increase leading to get startup
   247  # timeouts.
   248  # The source of entropy /dev/urandom is non-blocking and provides a
   249  # generally acceptable source of entropy. It should work well for pretty much
   250  # all practical purposes.
   251  #entropy_source= "@DEFENTROPYSOURCE@"
   252  
   253  # Path to OCI hook binaries in the *guest rootfs*.
   254  # This does not affect host-side hooks which must instead be added to
   255  # the OCI spec passed to the runtime.
   256  #
   257  # You can create a rootfs with hooks by customizing the osbuilder scripts:
   258  # https://github.com/kata-containers/osbuilder
   259  #
   260  # Hooks must be stored in a subdirectory of guest_hook_path according to their
   261  # hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
   262  # The agent will scan these directories for executable files and add them, in
   263  # lexicographical order, to the lifecycle of the guest container.
   264  # Hooks are executed in the runtime namespace of the guest. See the official documentation:
   265  # https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
   266  # Warnings will be logged if any error is encountered will scanning for hooks,
   267  # but it will not abort container execution.
   268  #guest_hook_path = "/usr/share/oci/hooks"
   269  
   270  [factory]
   271  # VM templating support. Once enabled, new VMs are created from template
   272  # using vm cloning. They will share the same initial kernel, initramfs and
   273  # agent memory by mapping it readonly. It helps speeding up new container
   274  # creation and saves a lot of memory if there are many kata containers running
   275  # on the same host.
   276  #
   277  # When disabled, new VMs are created from scratch.
   278  #
   279  # Note: Requires "initrd=" to be set ("image=" is not supported).
   280  #
   281  # Default false
   282  #enable_template = true
   283  
   284  # Specifies the path of template.
   285  #
   286  # Default "/run/vc/vm/template"
   287  #template_path = "/run/vc/vm/template"
   288  
   289  # The number of caches of VMCache:
   290  # unspecified or == 0   --> VMCache is disabled
   291  # > 0                   --> will be set to the specified number
   292  #
   293  # VMCache is a function that creates VMs as caches before using it.
   294  # It helps speed up new container creation.
   295  # The function consists of a server and some clients communicating
   296  # through Unix socket.  The protocol is gRPC in protocols/cache/cache.proto.
   297  # The VMCache server will create some VMs and cache them by factory cache.
   298  # It will convert the VM to gRPC format and transport it when gets
   299  # requestion from clients.
   300  # Factory grpccache is the VMCache client.  It will request gRPC format
   301  # VM and convert it back to a VM.  If VMCache function is enabled,
   302  # kata-runtime will request VM from factory grpccache when it creates
   303  # a new sandbox.
   304  #
   305  # Default 0
   306  #vm_cache_number = 0
   307  
   308  # Specify the address of the Unix socket that is used by VMCache.
   309  #
   310  # Default /var/run/kata-containers/cache.sock
   311  #vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
   312  
   313  [proxy.@PROJECT_TYPE@]
   314  path = "@PROXYPATH@"
   315  
   316  # If enabled, proxy messages will be sent to the system log
   317  # (default: disabled)
   318  #enable_debug = true
   319  
   320  [shim.@PROJECT_TYPE@]
   321  path = "@SHIMPATH@"
   322  
   323  # If enabled, shim messages will be sent to the system log
   324  # (default: disabled)
   325  #enable_debug = true
   326  
   327  # If enabled, the shim will create opentracing.io traces and spans.
   328  # (See https://www.jaegertracing.io/docs/getting-started).
   329  #
   330  # Note: By default, the shim runs in a separate network namespace. Therefore,
   331  # to allow it to send trace details to the Jaeger agent running on the host,
   332  # it is necessary to set 'disable_new_netns=true' so that it runs in the host
   333  # network namespace.
   334  #
   335  # (default: disabled)
   336  #enable_tracing = true
   337  
   338  [agent.@PROJECT_TYPE@]
   339  # If enabled, make the agent display debug-level messages.
   340  # (default: disabled)
   341  #enable_debug = true
   342  
   343  # Enable agent tracing.
   344  #
   345  # If enabled, the default trace mode is "dynamic" and the
   346  # default trace type is "isolated". The trace mode and type are set
   347  # explicity with the `trace_type=` and `trace_mode=` options.
   348  #
   349  # Notes:
   350  #
   351  # - Tracing is ONLY enabled when `enable_tracing` is set: explicitly
   352  #   setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing`
   353  #   will NOT activate agent tracing.
   354  #
   355  # - See https://github.com/kata-containers/agent/blob/master/TRACING.md for
   356  #   full details.
   357  #
   358  # (default: disabled)
   359  #enable_tracing = true
   360  #
   361  #trace_mode = "dynamic"
   362  #trace_type = "isolated"
   363  
   364  # Comma separated list of kernel modules and their parameters.
   365  # These modules will be loaded in the guest kernel using modprobe(8).
   366  # The following example can be used to load two kernel modules with parameters
   367  #  - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
   368  # The first word is considered as the module name and the rest as its parameters.
   369  # Container will not be started when:
   370  #  * A kernel module is specified and the modprobe command is not installed in the guest
   371  #    or it fails loading the module.
   372  #  * The module is not available in the guest or it doesn't met the guest kernel
   373  #    requirements, like architecture and version.
   374  #
   375  kernel_modules=[]
   376  
   377  
   378  [netmon]
   379  # If enabled, the network monitoring process gets started when the
   380  # sandbox is created. This allows for the detection of some additional
   381  # network being added to the existing network namespace, after the
   382  # sandbox has been created.
   383  # (default: disabled)
   384  #enable_netmon = true
   385  
   386  # Specify the path to the netmon binary.
   387  path = "@NETMONPATH@"
   388  
   389  # If enabled, netmon messages will be sent to the system log
   390  # (default: disabled)
   391  #enable_debug = true
   392  
   393  [runtime]
   394  # If enabled, the runtime will log additional debug messages to the
   395  # system log
   396  # (default: disabled)
   397  #enable_debug = true
   398  #
   399  # Internetworking model
   400  # Determines how the VM should be connected to the
   401  # the container network interface
   402  # Options:
   403  #
   404  #   - bridged (Deprecated)
   405  #     Uses a linux bridge to interconnect the container interface to
   406  #     the VM. Works for most cases except macvlan and ipvlan.
   407  #     ***NOTE: This feature has been deprecated with plans to remove this
   408  #     feature in the future. Please use other network models listed below.
   409  #
   410  #   - macvtap
   411  #     Used when the Container network interface can be bridged using
   412  #     macvtap.
   413  #
   414  #   - none
   415  #     Used when customize network. Only creates a tap device. No veth pair.
   416  #
   417  #   - tcfilter
   418  #     Uses tc filter rules to redirect traffic from the network interface
   419  #     provided by plugin to a tap interface connected to the VM.
   420  #
   421  internetworking_model="@DEFNETWORKMODEL_QEMU@"
   422  
   423  # disable guest seccomp
   424  # Determines whether container seccomp profiles are passed to the virtual
   425  # machine and applied by the kata agent. If set to true, seccomp is not applied
   426  # within the guest
   427  # (default: true)
   428  disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
   429  
   430  # If enabled, the runtime will create opentracing.io traces and spans.
   431  # (See https://www.jaegertracing.io/docs/getting-started).
   432  # (default: disabled)
   433  #enable_tracing = true
   434  
   435  # If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
   436  # This option may have some potential impacts to your host. It should only be used when you know what you're doing.
   437  # `disable_new_netns` conflicts with `enable_netmon`
   438  # `disable_new_netns` conflicts with `internetworking_model=bridged` and `internetworking_model=macvtap`. It works only
   439  # with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
   440  # (like OVS) directly.
   441  # If you are using docker, `disable_new_netns` only works with `docker run --net=none`
   442  # (default: false)
   443  #disable_new_netns = true
   444  
   445  # if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
   446  # The container cgroups in the host are not created, just one single cgroup per sandbox.
   447  # The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
   448  # The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
   449  # The sandbox cgroup is constrained if there is no container type annotation.
   450  # See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType
   451  sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
   452  
   453  # Enabled experimental feature list, format: ["a", "b"].
   454  # Experimental features are features not stable enough for production,
   455  # they may break compatibility, and are prepared for a big version bump.
   456  # Supported experimental features:
   457  # (default: [])
   458  experimental=@DEFAULTEXPFEATURES@