build-*
dist/*
docs/html/
+docs/man/xl.cfg.pod.5
+docs/man/xl.pod.1
docs/man1/
docs/man5/
docs/man8/
POD2HTML
POD2MAN
FIG2DEV
+XEN_DUMP_DIR
+XEN_PAGING_DIR
+XEN_LOCK_DIR
+XEN_SCRIPT_DIR
+XEN_CONFIG_DIR
+INITD_DIR
+CONFIG_DIR
+SHAREDIR
+XEN_LIB_DIR
+XEN_LIB_STORED
+XEN_LOG_DIR
+XEN_RUN_DIR
+XENFIRMWAREDIR
+LIBEXEC_INC
+LIBEXEC_LIB
+LIBEXEC_BIN
+LIBEXEC
+CONFIG_LEAF_DIR
target_alias
host_alias
build_alias
ac_subst_files=''
ac_user_opts='
enable_option_checking
+with_initddir
+with_sysconfig_leaf_dir
+with_libexec_leaf_dir
+with_xen_dumpdir
'
ac_precious_vars='build_alias
host_alias
esac
cat <<\_ACEOF
+Optional Packages:
+ --with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
+ --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --with-initddir=DIR Path to directory with sysv runlevel scripts.
+ [SYSCONFDIR/init.d]
+ --with-sysconfig-leaf-dir=SUBDIR
+ Name of subdirectory in /etc to store runtime
+ options for runlevel scripts and daemons such as
+ xenstored. This should be either "sysconfig" or
+ "default". [sysconfig]
+ --with-libexec-leaf-dir=SUBDIR
+ Name of subdirectory in libexecdir to use.
+ --with-xen-dumpdir=DIR Path to directory for domU crash dumps.
+ [LOCALSTATEDIR/lib/xen/dump]
+
Some influential environment variables:
FIG2DEV Path to fig2dev tool
POD2MAN Path to pod2man tool
-ac_config_files="$ac_config_files ../config/Docs.mk"
+ac_config_files="$ac_config_files ../config/Docs.mk man/xl.cfg.pod.5 man/xl.pod.1"
ac_aux_dir=
for ac_dir in ../ "$srcdir"/../; do
+
+test "x$prefix" = "xNONE" && prefix=$ac_default_prefix
+test "x$exec_prefix" = "xNONE" && exec_prefix=${prefix}
+
+if test "$localstatedir" = '${prefix}/var' ; then
+ localstatedir=/var
+fi
+
+bindir=`eval echo $bindir`
+sbindir=`eval echo $sbindir`
+libdir=`eval echo $libdir`
+
+if test "x$sysconfdir" = 'x${prefix}/etc' ; then
+ case "$host_os" in
+ *freebsd*)
+ sysconfdir=$prefix/etc
+ ;;
+ *solaris*)
+ if test "$prefix" = "/usr" ; then
+ sysconfdir=/etc
+ else
+ sysconfdir=$prefix/etc
+ fi
+ ;;
+ *)
+ sysconfdir=/etc
+ ;;
+ esac
+fi
+
+
+# Check whether --with-initddir was given.
+if test "${with_initddir+set}" = set; then :
+ withval=$with_initddir; initddir_path=$withval
+else
+ case "$host_os" in
+ *linux*)
+ if test -d $sysconfdir/rc.d/init.d ; then
+ initddir_path=$sysconfdir/rc.d/init.d
+ else
+ initddir_path=$sysconfdir/init.d
+ fi
+ ;;
+ *)
+ initddir_path=$sysconfdir/rc.d
+ ;;
+ esac
+fi
+
+
+
+# Check whether --with-sysconfig-leaf-dir was given.
+if test "${with_sysconfig_leaf_dir+set}" = set; then :
+ withval=$with_sysconfig_leaf_dir; config_leaf_dir=$withval
+else
+ config_leaf_dir=sysconfig
+ if test ! -d /etc/sysconfig ; then config_leaf_dir=default ; fi
+fi
+
+CONFIG_LEAF_DIR=$config_leaf_dir
+
+
+
+# Check whether --with-libexec-leaf-dir was given.
+if test "${with_libexec_leaf_dir+set}" = set; then :
+ withval=$with_libexec_leaf_dir; libexec_subdir=$withval
+else
+ libexec_subdir=$PACKAGE_TARNAME
+fi
+
+
+
+# Check whether --with-xen-dumpdir was given.
+if test "${with_xen_dumpdir+set}" = set; then :
+ withval=$with_xen_dumpdir; xen_dumpdir_path=$withval
+else
+ xen_dumpdir_path=$localstatedir/lib/xen/dump
+fi
+
+
+if test "$libexecdir" = '${exec_prefix}/libexec' ; then
+ case "$host_os" in
+ *netbsd*) ;;
+ *)
+ libexecdir='${exec_prefix}/lib'
+ ;;
+ esac
+fi
+LIBEXEC=`eval echo $libexecdir/$libexec_subdir`
+
+
+LIBEXEC_BIN=${LIBEXEC}/bin
+
+LIBEXEC_LIB=${LIBEXEC}/lib
+
+LIBEXEC_INC=${LIBEXEC}/include
+
+XENFIRMWAREDIR=${LIBEXEC}/boot
+
+
+XEN_RUN_DIR=$localstatedir/run/xen
+
+
+XEN_LOG_DIR=$localstatedir/log/xen
+
+
+XEN_LIB_STORED=$localstatedir/lib/xenstored
+
+
+XEN_LIB_DIR=$localstatedir/lib/xen
+
+
+SHAREDIR=$prefix/share
+
+
+CONFIG_DIR=$sysconfdir
+
+
+INITD_DIR=$initddir_path
+
+
+XEN_CONFIG_DIR=$CONFIG_DIR/xen
+
+
+XEN_SCRIPT_DIR=$XEN_CONFIG_DIR/scripts
+
+
+case "$host_os" in
+*freebsd*) XEN_LOCK_DIR=$localstatedir/lib ;;
+*netbsd*) XEN_LOCK_DIR=$localstatedir/lib ;;
+*) XEN_LOCK_DIR=$localstatedir/lock ;;
+esac
+
+
+XEN_PAGING_DIR=$localstatedir/lib/xen/xenpaging
+
+
+XEN_DUMP_DIR=$xen_dumpdir_path
+
+
+
+
+
# Extract the first word of "fig2dev", so it can be a program name with args.
set dummy fig2dev; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
do
case $ac_config_target in
"../config/Docs.mk") CONFIG_FILES="$CONFIG_FILES ../config/Docs.mk" ;;
+ "man/xl.cfg.pod.5") CONFIG_FILES="$CONFIG_FILES man/xl.cfg.pod.5" ;;
+ "man/xl.pod.1") CONFIG_FILES="$CONFIG_FILES man/xl.pod.1" ;;
*) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
AC_INIT([Xen Hypervisor Documentation], m4_esyscmd([../version.sh ../xen/Makefile]),
[xen-devel@lists.xen.org], [xen], [http://www.xen.org/])
AC_CONFIG_SRCDIR([misc/xen-command-line.markdown])
-AC_CONFIG_FILES([../config/Docs.mk])
+AC_CONFIG_FILES([
+../config/Docs.mk
+man/xl.cfg.pod.5
+man/xl.pod.1
+])
AC_CONFIG_AUX_DIR([../])
# M4 Macro includes
m4_include([../m4/docs_tool.m4])
m4_include([../m4/path_or_fail.m4])
m4_include([../m4/features.m4])
+m4_include([../m4/paths.m4])
+
+AX_XEN_EXPAND_CONFIG()
AX_DOCS_TOOL_PROG([FIG2DEV], [fig2dev])
AX_DOCS_TOOL_PROG([POD2MAN], [pod2man])
+++ /dev/null
-=head1 NAME
-
-xl.cfg - XL Domain Configuration File Syntax
-
-=head1 SYNOPSIS
-
- /etc/xen/xldomain
-
-=head1 DESCRIPTION
-
-To create a VM (a domain in Xen terminology, sometimes called a guest)
-with xl requires the provision of a domain config file. Typically
-these live in `/etc/xen/DOMAIN.cfg` where DOMAIN is the name of the
-domain.
-
-=head1 SYNTAX
-
-A domain config file consists of a series of C<KEY=VALUE> pairs.
-
-Some C<KEY>s are mandatory, others are general options which apply to
-any guest type while others relate only to specific guest types
-(e.g. PV or HVM guests).
-
-A value C<VALUE> is one of:
-
-=over 4
-
-=item B<"STRING">
-
-A string, surrounded by either single or double quotes.
-
-=item B<NUMBER>
-
-A number, in either decimal, octal (using a C<0> prefix) or
-hexadecimal (using an C<0x> prefix).
-
-=item B<BOOLEAN>
-
-A C<NUMBER> interpreted as C<False> (C<0>) or C<True> (any other
-value).
-
-=item B<[ VALUE, VALUE, ... ]>
-
-A list of C<VALUES> of the above types. Lists can be heterogeneous and
-nested.
-
-=back
-
-The semantics of each C<KEY> defines which form of C<VALUE> is required.
-
-Pairs may be separated either by a newline or a semicolon. Both
-of the following are valid:
-
- name="h0"
- builder="hvm"
-
- name="h0"; builder="hvm"
-
-=head1 OPTIONS
-
-=head2 Mandatory Configuration Items
-
-The following key is mandatory for any guest type:
-
-=over 4
-
-=item B<name="NAME">
-
-Specifies the name of the domain. Names of domains existing on a
-single host must be unique.
-
-=back
-
-=head2 Selecting Guest Type
-
-=over 4
-
-=item B<builder="generic">
-
-Specifies that this is to be a PV domain. This is the default.
-
-=item B<builder="hvm">
-
-Specifies that this is to be an HVM domain. That is, a fully
-virtualised computer with emulated BIOS, disk and network peripherals,
-etc. The default is a PV domain, suitable for hosting Xen-aware guest
-operating systems.
-
-=back
-
-=head2 General Options
-
-The following options apply to guests of any type.
-
-=head3 CPU Allocation
-
-=over 4
-
-=item B<pool="CPUPOOLNAME">
-
-Put the guest's vcpus into the named cpu pool.
-
-=item B<vcpus=N>
-
-Start the guest with N vcpus initially online.
-
-=item B<maxvcpus=M>
-
-Allow the guest to bring up a maximum of M vcpus. At start of day if
-`vcpus=N` is less than `maxvcpus=M` then the first `N` vcpus will be
-created online and the remainder will be offline.
-
-=item B<cpus="CPU-LIST">
-
-List of which cpus the guest is allowed to use. Default is no pinning at
-all (more on this below). A C<CPU-LIST> may be specified as follows:
-
-=over 4
-
-=item "all"
-
-To allow all the vcpus of the guest to run on all the cpus on the host.
-
-=item "0-3,5,^1"
-
-To allow all the vcpus of the guest to run on cpus 0,2,3,5. Combining
-this with "all" is possible, meaning "all,^7" results in all the vcpus
-of the guest running on all the cpus on the host except cpu 7.
-
-=item "nodes:0-3,node:^2"
-
-To allow all the vcpus of the guest to run on the cpus from NUMA nodes
-0,1,3 of the host. So, if cpus 0-3 belongs to node 0, cpus 4-7 belongs
-to node 1 and cpus 8-11 to node 3, the above would mean all the vcpus
-of the guest will run on cpus 0-3,8-11.
-
-Combining this notation with the one above is possible. For instance,
-"1,node:2,^6", means all the vcpus of the guest will run on cpu 1 and
-on all the cpus of NUMA node 2, but not on cpu 6. Following the same
-example as above, that would be cpus 1,4,5,7.
-
-Combining this with "all" is also possible, meaning "all,^nodes:1"
-results in all the vcpus of the guest running on all the cpus on the
-host, except for the cpus belonging to the host NUMA node 1.
-
-=item ["2", "3-8,^5"]
-
-To ask for specific vcpu mapping. That means (in this example), vcpu 0
-of the guest will run on cpu 2 of the host and vcpu 1 of the guest will
-run on cpus 3,4,6,7,8 of the host.
-
-More complex notation can be also used, exactly as described above. So
-"all,^5-8", or just "all", or "node:0,node:2,^9-11,18-20" are all legal,
-for each element of the list.
-
-=back
-
-If this option is not specified, no vcpu to cpu pinning is established,
-and the vcpus of the guest can run on all the cpus of the host. If this
-option is specified, the intersection of the vcpu pinning mask, provided
-here, and the soft affinity mask, provided via B<cpus\_soft=> (if any),
-is utilized to compute the domain node-affinity, for driving memory
-allocations.
-
-=item B<cpus_soft="CPU-LIST">
-
-Exactly as B<cpus=>, but specifies soft affinity, rather than pinning
-(hard affinity). When using the credit scheduler, this means what cpus
-the vcpus of the domain prefer.
-
-A C<CPU-LIST> is specified exactly as above, for B<cpus=>.
-
-If this option is not specified, the vcpus of the guest will not have
-any preference regarding on what cpu to run. If this option is specified,
-the intersection of the soft affinity mask, provided here, and the vcpu
-pinning, provided via B<cpus=> (if any), is utilized to compute the
-domain node-affinity, for driving memory allocations.
-
-If this option is not specified (and B<cpus=> is not specified either),
-libxl automatically tries to place the guest on the least possible
-number of nodes. A heuristic approach is used for choosing the best
-node (or set of nodes), with the goal of maximizing performance for
-the guest and, at the same time, achieving efficient utilization of
-host cpus and memory. In that case, the soft affinity of all the vcpus
-of the domain will be set to the pcpus belonging to the NUMA nodes
-chosen during placement.
-
-For more details, see F<docs/misc/xl-numa-placement.markdown>.
-
-=back
-
-=head3 CPU Scheduling
-
-=over 4
-
-=item B<cpu_weight=WEIGHT>
-
-A domain with a weight of 512 will get twice as much CPU as a domain
-with a weight of 256 on a contended host.
-Legal weights range from 1 to 65535 and the default is 256.
-Honoured by the credit and credit2 schedulers.
-
-=item B<cap=N>
-
-The cap optionally fixes the maximum amount of CPU a domain will be
-able to consume, even if the host system has idle CPU cycles.
-The cap is expressed in percentage of one physical CPU:
-100 is 1 physical CPU, 50 is half a CPU, 400 is 4 CPUs, etc.
-The default, 0, means there is no upper cap.
-Honoured by the credit and credit2 schedulers.
-
-NB: Many systems have features that will scale down the computing
-power of a cpu that is not 100% utilized. This can be in the
-operating system, but can also sometimes be below the operating system
-in the BIOS. If you set a cap such that individual cores are running
-at less than 100%, this may have an impact on the performance of your
-workload over and above the impact of the cap. For example, if your
-processor runs at 2GHz, and you cap a vm at 50%, the power management
-system may also reduce the clock speed to 1GHz; the effect will be
-that your VM gets 25% of the available power (50% of 1GHz) rather than
-50% (50% of 2GHz). If you are not getting the performance you expect,
-look at performance and cpufreq options in your operating system and
-your BIOS.
-
-=back
-
-=head3 Memory Allocation
-
-=over 4
-
-=item B<memory=MBYTES>
-
-Start the guest with MBYTES megabytes of RAM.
-
-=item B<maxmem=MBYTES>
-
-Specifies the maximum amount of memory a guest can ever see.
-The value of B<maxmem=> must be equal or greater than B<memory=>.
-
-In combination with B<memory=> it will start the guest "pre-ballooned",
-if the values of B<memory=> and B<maxmem=> differ.
-A "pre-ballooned" HVM guest needs a balloon driver, without a balloon driver
-it will crash.
-
-NOTE: Because of the way ballooning works, the guest has to allocate
-memory to keep track of maxmem pages, regardless of how much memory it
-actually has available to it. A guest with maxmem=262144 and
-memory=8096 will report significantly less memory available for use
-than a system with maxmem=8096 memory=8096 due to the memory overhead
-of having to track the unused pages.
-
-=back
-
-=head3 Guest Virtual NUMA Configuration
-
-=over 4
-
-=item B<vnuma=[ VNODE_SPEC, VNODE_SPEC, ... ]>
-
-Specify virtual NUMA configuration with positional arguments. The
-nth B<VNODE_SPEC> in the list specifies the configuration of nth
-virtual node.
-
-Note that virtual NUMA for PV guest is not yet supported, because
-there is an issue with cpuid handling that affects PV virtual NUMA.
-Furthermore, guests with virtual NUMA cannot be saved or migrated
-because the migration stream does not preserve node information.
-
-Each B<VNODE_SPEC> is a list, which has a form of
-"[VNODE_CONFIG_OPTION,VNODE_CONFIG_OPTION, ... ]" (without quotes).
-
-For example vnuma = [ ["pnode=0","size=512","vcpus=0-4","vdistances=10,20"] ]
-means vnode 0 is mapped to pnode 0, has 512MB ram, has vcpus 0 to 4, the
-distance to itself is 10 and the distance to vnode 1 is 20.
-
-Each B<VNODE_CONFIG_OPTION> is a quoted key=value pair. Supported
-B<VNODE_CONFIG_OPTION>s are (they are all mandatory at the moment):
-
-=over 4
-
-=item B<pnode=NUMBER>
-
-Specify which physical node this virtual node maps to.
-
-=item B<size=MBYTES>
-
-Specify the size of this virtual node. The sum of memory size of all
-vnodes will become B<maxmem=>. If B<maxmem=> is specified separately,
-a check is performed to make sure the sum of all vnode memory matches
-B<maxmem=>.
-
-=item B<vcpus=CPU-STRING>
-
-Specify which vcpus belong to this node. B<CPU-STRING> is a string
-separated by comma. You can specify range and single cpu. An example
-is "vcpus=0-5,8", which means you specify vcpu 0 to vcpu 5, and vcpu
-8.
-
-=item B<vdistances=NUMBER, NUMBER, ... >
-
-Specify virtual distance from this node to all nodes (including
-itself) with positional arguments. For example, "vdistance=10,20"
-for vnode 0 means the distance from vnode 0 to vnode 0 is 10, from
-vnode 0 to vnode 1 is 20. The number of arguments supplied must match
-the total number of vnodes.
-
-Normally you can use the values from "xl info -n" or "numactl
---hardware" to fill in vdistance list.
-
-=back
-
-=back
-
-=head3 Event Actions
-
-=over 4
-
-=item B<on_poweroff="ACTION">
-
-Specifies what should be done with the domain if it shuts itself down.
-The C<ACTION>s are:
-
-=over 4
-
-=item B<destroy>
-
-destroy the domain
-
-=item B<restart>
-
-destroy the domain and immediately create a new domain with the same
-configuration
-
-=item B<rename-restart>
-
-rename the domain which terminated, and then immediately create a new
-domain with the same configuration as the original
-
-=item B<preserve>
-
-keep the domain. It can be examined, and later destroyed with `xl
-destroy`.
-
-=item B<coredump-destroy>
-
-write a "coredump" of the domain to F</var/lib/xen/dump/NAME> and then
-destroy the domain.
-
-=item B<coredump-restart>
-
-write a "coredump" of the domain to F</var/lib/xen/dump/NAME> and then
-restart the domain.
-
-=item B<soft-reset>
-
-Reset all Xen specific interfaces for the Xen-aware HVM domain allowing
-it to reestablish these interfaces and continue executing the domain. PV
-and non-Xen-aware HVM guests are not supported.
-
-=back
-
-The default for C<on_poweroff> is C<destroy>.
-
-=item B<on_reboot="ACTION">
-
-Action to take if the domain shuts down with a reason code requesting
-a reboot. Default is C<restart>.
-
-=item B<on_watchdog="ACTION">
-
-Action to take if the domain shuts down due to a Xen watchdog timeout.
-Default is C<destroy>.
-
-=item B<on_crash="ACTION">
-
-Action to take if the domain crashes. Default is C<destroy>.
-
-=item B<on_soft_reset="ACTION">
-
-Action to take if the domain performs 'soft reset' (e.g. does kexec).
-Default is C<soft-reset>.
-
-=back
-
-=head3 Direct Kernel Boot
-
-Direct kernel boot allows booting directly from a kernel and initrd
-stored in the host physical machine OS, allowing command line arguments
-to be passed directly. PV guest direct kernel boot is supported. HVM
-guest direct kernel boot is supported with limitation (it's supported
-when using qemu-xen and default BIOS 'seabios'; not supported in case of
-stubdom-dm and old rombios.)
-
-=over 4
-
-=item B<kernel="PATHNAME">
-
-Load the specified file as the kernel image.
-
-=item B<ramdisk="PATHNAME">
-
-Load the specified file as the ramdisk.
-
-=item B<cmdline="STRING">
-
-Append B<cmdline="STRING"> to the kernel command line. (Note: it is
-guest specific what meaning this has). It can replace B<root="STRING">
-plus B<extra="STRING"> and is preferred. When B<cmdline="STRING"> is set,
-B<root="STRING"> and B<extra="STRING"> will be ignored.
-
-=item B<root="STRING">
-
-Append B<root="STRING"> to the kernel command line (Note: it is guest
-specific what meaning this has).
-
-=item B<extra="STRING">
-
-Append B<STRING> to the kernel command line. (Note: it is guest
-specific what meaning this has).
-
-=back
-
-=head3 Other Options
-
-=over 4
-
-=item B<uuid="UUID">
-
-Specifies the UUID of the domain. If not specified, a fresh unique
-UUID will be generated.
-
-=item B<seclabel="LABEL">
-
-Assign an XSM security label to this domain.
-
-=item B<init_seclabel="LABEL">
-
-Specify an XSM security label used for this domain temporarily during
-its build. The domain's XSM label will be changed to the execution
-seclabel (specified by "seclabel") once the build is complete, prior to
-unpausing the domain. With a properly constructed security policy (such
-as nomigrate_t in the example policy), this can be used to build a
-domain whose memory is not accessible to the toolstack domain.
-
-=item B<nomigrate=BOOLEAN>
-
-Disable migration of this domain. This enables certain other features
-which are incompatible with migration. Currently this is limited to
-enabling the invariant TSC feature flag in cpuid results when TSC is
-not emulated.
-
-=item B<driver_domain=BOOLEAN>
-
-Specify that this domain is a driver domain. This enables certain
-features needed in order to run a driver domain.
-
-=item B<device_tree=PATH>
-
-Specify a partial device tree (compiled via the Device Tree Compiler).
-Everything under the node "/passthrough" will be copied into the guest
-device tree. For convenience, the node "/aliases" is also copied to allow
-the user to defined aliases which can be used by the guest kernel.
-
-Given the complexity of verifying the validity of a device tree, this
-option should only be used with trusted device tree.
-
-Note that the partial device tree should avoid to use the phandle 65000
-which is reserved by the toolstack.
-
-=back
-
-=head2 Devices
-
-The following options define the paravirtual, emulated and physical
-devices which the guest will contain.
-
-=over 4
-
-=item B<disk=[ "DISK_SPEC_STRING", "DISK_SPEC_STRING", ...]>
-
-Specifies the disks (both emulated disks and Xen virtual block
-devices) which are to be provided to the guest, and what objects on
-the they should map to. See F<docs/misc/xl-disk-configuration.txt>.
-
-=item B<vif=[ "NET_SPEC_STRING", "NET_SPEC_STRING", ...]>
-
-Specifies the networking provision (both emulated network adapters,
-and Xen virtual interfaces) to provided to the guest. See
-F<docs/misc/xl-network-configuration.markdown>.
-
-=item B<vtpm=[ "VTPM_SPEC_STRING", "VTPM_SPEC_STRING", ...]>
-
-Specifies the virtual trusted platform module to be
-provided to the guest. Please see F<docs/misc/vtpm.txt>
-for more details.
-
-Each B<VTPM_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
-settings, from the following list:
-
-=over 4
-
-=item C<backend=DOMAIN>
-
-Specify the backend domain name of id. This value is required!
-If this domain is a guest, the backend should be set to the
-vtpm domain name. If this domain is a vtpm, the
-backend should be set to the vtpm manager domain name.
-
-=item C<uuid=UUID>
-
-Specify the uuid of this vtpm device. The uuid is used to uniquely
-identify the vtpm device. You can create one using the uuidgen
-program on unix systems. If left unspecified, a new uuid
-will be randomly generated every time the domain boots.
-If this is a vtpm domain, you should specify a value. The
-value is optional if this is a guest domain.
-
-=back
-
-=item B<vfb=[ "VFB_SPEC_STRING", "VFB_SPEC_STRING", ...]>
-
-Specifies the paravirtual framebuffer devices which should be supplied
-to the domain.
-
-This option does not control the emulated graphics card presented to
-an HVM guest. See L<Emulated VGA Graphics Device> below for how to
-configure the emulated device. If L<Emulated VGA Graphics Device> options
-are used in a PV guest configuration, xl will pick up B<vnc>, B<vnclisten>,
-B<vncpasswd>, B<vncdisplay>, B<vncunused>, B<sdl>, B<opengl> and
-B<keymap> to construct paravirtual framebuffer device for the guest.
-
-Each B<VFB_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
-settings, from the following list:
-
-=over 4
-
-=item C<vnc=BOOLEAN>
-
-Allow access to the display via the VNC protocol. This enables the
-other VNC-related settings. The default is to enable this.
-
-=item C<vnclisten="ADDRESS[:DISPLAYNUM]">
-
-Specifies the IP address, and optionally VNC display number, to use.
-
-NB that if you specify the display number here, you should not use
-vncdisplay.
-
-=item C<vncdisplay=DISPLAYNUM>
-
-Specifies the VNC display number to use. The actual TCP port number
-will be DISPLAYNUM+5900.
-
-NB that you should not use this option if you set the displaynum in the
-vnclisten string.
-
-=item C<vncunused=BOOLEAN>
-
-Requests that the VNC display setup search for a free TCP port to use.
-The actual display used can be accessed with C<xl vncviewer>.
-
-=item C<vncpasswd="PASSWORD">
-
-Specifies the password for the VNC server.
-
-=item C<sdl=BOOLEAN>
-
-Specifies that the display should be presented via an X window (using
-Simple DirectMedia Layer). The default is to not enable this mode.
-
-=item C<display=DISPLAY>
-
-Specifies the X Window display that should be used when the sdl option
-is used.
-
-=item C<xauthority=XAUTHORITY>
-
-Specifies the path to the X authority file that should be used to
-connect to the X server when the sdl option is used.
-
-=item C<opengl=BOOLEAN>
-
-Enable OpenGL acceleration of the SDL display. Only effects machines
-using C<device_model_version="qemu-xen-traditional"> and only if the
-device-model was compiled with OpenGL support. Disabled by default.
-
-=item C<keymap="LANG">
-
-Configure the keymap to use for the keyboard associated with this
-display. If the input method does not easily support raw keycodes
-(e.g. this is often the case when using VNC) then this allows us to
-correctly map the input keys into keycodes seen by the guest. The
-specific values which are accepted are defined by the version of the
-device-model which you are using. See L</"Keymaps"> below or consult the
-L<qemu(1)> manpage. The default is B<en-us>.
-
-=back
-
-=item B<channel=[ "CHANNEL_SPEC_STRING", "CHANNEL_SPEC_STRING", ...]>
-
-Specifies the virtual channels to be provided to the guest. A
-channel is a low-bandwidth, bidirectional byte stream, which resembles
-a serial link. Typical uses for channels include transmitting VM
-configuration after boot and signalling to in-guest agents. Please see
-F<docs/misc/channels.txt> for more details.
-
-Each B<CHANNEL_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
-seettings. Leading and trailing whitespace is ignored in both KEY and
-VALUE. Neither KEY nor VALUE may contain ',', '=' or '"'. Defined values
-are:
-
-=over 4
-
-=item C<backend=DOMAIN>
-
-Specify the backend domain name or id. This parameter is optional. If
-this parameter is omitted then the toolstack domain will be assumed.
-
-=item C<name=NAME>
-
-Specify the string name for this device. This parameter is mandatory.
-This should be a well-known name for the specific application (e.g.
-guest agent) and should be used by the frontend to connect the
-application to the right channel device. There is no formal registry
-of channel names, so application authors are encouraged to make their
-names unique by including domain name and version number in the string
-(e.g. org.mydomain.guestagent.1).
-
-=item C<connection=CONNECTION>
-
-Specify how the backend will be implemented. This following options are
-available:
-
-=over 4
-
-=item B<connection=SOCKET>
-
-The backend will bind a Unix domain socket (at the path given by
-B<path=PATH>), call listen and accept connections. The backend will proxy
-data between the channel and the connected socket.
-
-=item B<connection=PTY>
-
-The backend will create a pty and proxy data between the channel and the
-master device. The command B<xl channel-list> can be used to discover the
-assigned slave device.
-
-=back
-
-=back
-
-=item B<rdm="RDM_RESERVATION_STRING">
-
-(HVM/x86 only) Specifies information about Reserved Device Memory (RDM),
-which is necessary to enable robust device passthrough. One example of RDM
-is reported through ACPI Reserved Memory Region Reporting (RMRR) structure
-on x86 platform.
-
-B<RDM_RESERVE_STRING> has the form C<[KEY=VALUE,KEY=VALUE,...> where:
-
-=over 4
-
-=item B<KEY=VALUE>
-
-Possible B<KEY>s are:
-
-=over 4
-
-=item B<strategy="STRING">
-
-Currently there is only one valid type:
-
-"host" means all reserved device memory on this platform should be checked to
-reserve regions in this VM's guest address space. This global rdm parameter
-allows user to specify reserved regions explicitly, and using "host" includes
-all reserved regions reported on this platform, which is useful when doing
-hotplug.
-
-By default this isn't set so we don't check all rdms. Instead, we just check
-rdm specific to a given device if you're assigning this kind of device. Note
-this option is not recommended unless you can make sure any conflict does exist.
-
-For example, you're trying to set "memory = 2800" to allocate memory to one
-given VM but the platform owns two RDM regions like,
-
-Device A [sbdf_A]: RMRR region_A: base_addr ac6d3000 end_address ac6e6fff
-Device B [sbdf_B]: RMRR region_B: base_addr ad800000 end_address afffffff
-
-In this conflict case,
-
-#1. If B<strategy> is set to "host", for example,
-
-rdm = "strategy=host,policy=strict" or rdm = "strategy=host,policy=relaxed"
-
-It means all conflicts will be handled according to the policy
-introduced by B<policy> as described below.
-
-#2. If B<strategy> is not set at all, but
-
-pci = [ 'sbdf_A, rdm_policy=xxxxx' ]
-
-It means only one conflict of region_A will be handled according to the policy
-introduced by B<rdm_policy="STRING"> as described inside pci options.
-
-=item B<policy="STRING">
-
-Specifies how to deal with conflicts when reserving reserved device
-memory in guest address space.
-
-When that conflict is unsolved,
-
-"strict" means VM can't be created, or the associated device can't be
-attached in the case of hotplug.
-
-"relaxed" allows VM to be created but may cause VM to crash if
-pass-through device accesses RDM. For exampl,e Windows IGD GFX driver
-always accessed RDM regions so it leads to VM crash.
-
-Note this may be overridden by rdm_policy option in PCI device configuration.
-
-=back
-
-=back
-
-=item B<usbctrl=[ "USBCTRL_SPEC_STRING", "USBCTRL_SPEC_STRING", ... ]>
-
-Specifies the USB controllers created for this guest. Each
-B<USB_SPEC_STRING> has the form C<KEY=VALUE,KEY=VALUE,...> where:
-
-=over 4
-
-=item B<KEY=VALUE>
-
-Possible B<KEY>s are:
-
-=over 4
-
-=item B<type=TYPE>
-
-Specifies the usb controller type.
-
-"pv" denotes a kernel based pvusb backend.
-
-"qusb" specifies a qemu base backend for pvusb.
-
-"auto" (the default) determines whether a kernel based backend is installed.
-If this is the case, "pv" is selected, "qusb" will be selected if no kernel
-backend is currently available.
-
-=item B<version=VERSION>
-
-Specifies the usb controller version. Possible values include
-1 (USB1.1) and 2 (USB2.0). Default is 2 (USB2.0).
-
-=item B<ports=PORTS>
-
-Specifies the total ports of the usb controller. The maximum
-number is 31. Default is 8.
-
-USB controler ids start from 0. In line with the USB spec, however,
-ports on a controller start from 1.
-
-E.g.
-usbctrl=["version=1,ports=4", "version=2,ports=8",]
-The first controller has:
-controller id = 0, and port 1,2,3,4.
-The second controller has:
-controller id = 1, and port 1,2,3,4,5,6,7,8.
-
-=back
-
-=back
-
-=item B<usbdev=[ "USB_SPEC_STRING", "USB_SPEC_STRING", ... ]>
-
-Specifies the USB devices to be attached to the guest at boot. Each
-B<USB_SPEC_STRING> has the form C<KEY=VALUE,KEY=VALUE,...> where:
-
-=over 4
-
-=item B<KEY=VALUE>
-
-Possible B<KEY>s are:
-
-=over 4
-
-=item B<devtype=hostdev>
-
-Specifies USB device type. Currently only support 'hostdev'.
-
-=item B<hostbus=busnum>
-
-Specifies busnum of the USB device from the host perspective.
-
-=item B<hostaddr=devnum>
-
-Specifies devnum of the USB device from the host perspective.
-
-=item B<controller=CONTROLLER>
-
-Specifies USB controller id, to which controller the USB device is attached.
-
-=item B<port=PORT>
-
-Specifies USB port, to which port the USB device is attached. B<port=PORT>
-is valid only when B<controller=CONTROLLER> is specified.
-
-=back
-
-If no controller is specified, an available controller:port combination
-will be used. If there are no available controller:port options,
-a new controller will be created.
-
-=back
-
-=item B<pci=[ "PCI_SPEC_STRING", "PCI_SPEC_STRING", ... ]>
-
-Specifies the host PCI devices to passthrough to this guest. Each B<PCI_SPEC_STRING>
-has the form C<[DDDD:]BB:DD.F[@VSLOT],KEY=VALUE,KEY=VALUE,...> where:
-
-=over 4
-
-=item B<DDDD:BB:DD.F>
-
-Identifies the PCI device from the host perspective in domain
-(B<DDDD>), Bus (B<BB>), Device (B<DD>) and Function (B<F>) syntax. This is
-the same scheme as used in the output of C<lspci> for the device in
-question. Note: By default C<lspci> will omit the domain (B<DDDD>) if it
-is zero and it is optional here also. You may specify the function
-(B<F>) as B<*> to indicate all functions.
-
-=item B<@VSLOT>
-
-Specifies the virtual device where the guest will see this
-device. This is equivalent to the B<DD> which the guest sees. In a
-guest B<DDDD> and B<BB> are C<0000:00>.
-
-=item B<KEY=VALUE>
-
-Possible B<KEY>s are:
-
-=over 4
-
-=item B<permissive=BOOLEAN>
-
-By default pciback only allows PV guests to write "known safe" values
-into PCI config space, likewise QEMU (both qemu-xen and
-qemu-traditional) imposes the same contraint on HVM guests. However
-many devices require writes to other areas of config space in order to
-operate properly. This option tells the backend (pciback or QEMU) to
-allow all writes to PCI config space of this device by this domain.
-
-This option should be enabled with caution: it gives the guest much
-more control over the device, which may have security or stability
-implications. It is recommended to enable this option only for
-trusted VMs under administrator control.
-
-=item B<msitranslate=BOOLEAN>
-
-Specifies that MSI-INTx translation should be turned on for the PCI
-device. When enabled, MSI-INTx translation will always enable MSI on
-the PCI device regardless whether the guest uses INTx or MSI. Some
-device drivers, such as NVIDIA's, detect an inconsistency and do not
-function when this option is enabled. Therefore the default is false (0).
-
-=item B<seize=BOOLEAN>
-
-Tells xl to automatically attempt to re-assign a device to
-pciback if it is not already assigned.
-
-WARNING: If you set this option, xl will gladly re-assign a critical
-system device, such as a network or a disk controller being used by
-dom0 without confirmation. Please use with care.
-
-=item B<power_mgmt=BOOLEAN>
-
-(HVM only) Specifies that the VM should be able to program the
-D0-D3hot power management states for the PCI device. False (0) by
-default.
-
-=item B<rdm_policy="STRING">
-
-(HVM/x86 only) This is same as policy option inside the rdm option but
-just specific to a given device. Therefore the default is "relaxed" as
-same as policy option as well.
-
-Note this would override global B<rdm> option.
-
-=back
-
-=back
-
-=item B<pci_permissive=BOOLEAN>
-
-Changes the default value of 'permissive' for all PCI devices passed
-through to this VM. See L<permissive|/"permissive_boolean"> above.
-
-=item B<pci_msitranslate=BOOLEAN>
-
-Changes the default value of 'msitranslate' for all PCI devices passed
-through to this VM. See L<msitranslate|/"msitranslate_boolean"> above.
-
-=item B<pci_seize=BOOLEAN>
-
-Changes the default value of 'seize' for all PCI devices passed
-through to this VM. See L<seize|/"seize_boolean"> above.
-
-=item B<pci_power_mgmt=BOOLEAN>
-
-(HVM only) Changes the default value of 'power_mgmt' for all PCI
-devices passed through to this VM. See L<power_mgt|/"power_mgmt_boolean">
-above.
-
-=item B<gfx_passthru=BOOLEAN|"STRING">
-
-Enable graphics device PCI passthrough. This option makes an assigned
-PCI graphics card become primary graphics card in the VM. The QEMU
-emulated graphics adapter is disabled and the VNC console for the VM
-will not have any graphics output. All graphics output, including boot
-time QEMU BIOS messages from the VM, will go to the physical outputs
-of the passedthrough physical graphics card.
-
-The graphics card PCI device to passthrough is chosen with B<pci>
-option, exactly in the same way as normal Xen PCI device
-passthrough/assignment is done. Note that gfx_passthru does not do
-any kind of sharing of the GPU, so you can only assign the GPU to one
-single VM at a time.
-
-gfx_passthru also enables various legacy VGA memory ranges, BARs, MMIOs,
-and ioports to be passed thru to the VM, since those are required
-for correct operation of things like VGA BIOS, text mode, VBE, etc.
-
-Enabling gfx_passthru option also copies the physical graphics card
-video BIOS to the guest memory, and executes the VBIOS in the guest
-to initialize the graphics card.
-
-Most graphics adapters require vendor specific tweaks for properly
-working graphics passthrough. See the XenVGAPassthroughTestedAdapters
-L<http://wiki.xen.org/wiki/XenVGAPassthroughTestedAdapters> wiki page
-for currently supported graphics cards for gfx_passthru.
-
-gfx_passthru is currently supported both with the qemu-xen-traditional
-device-model and upstream qemu-xen device-model.
-
-When given as a boolean the B<gfx_passthru> option either disables gfx
-passthru or enables autodetection.
-
-But when given as a string the B<gfx_passthru> option describes the type
-of device to enable. Note this behavior is only supported with the upstream
-qemu-xen device-model. With qemu-xen-traditional IGD is always assumed
-and other options than autodetect or explicit IGD will result in an error.
-
-Currently, valid options are:
-
-=over 4
-
-=item B<gfx_passthru=0>
-
-Disables graphics device PCI passthrough.
-
-=item B<gfx_passthru=1>, B<gfx_passthru="default">
-
-Enables graphics device PCI passthrough and autodetects the type of device
-which is being used.
-
-=item "igd"
-
-Enables graphics device PCI passthrough but forcing the type of device to
-Intel Graphics Device.
-
-=back
-
-Note that some graphics adapters (AMD/ATI cards, for example) do not
-necessarily require gfx_passthru option, so you can use the normal Xen
-PCI passthrough to assign the graphics card as a secondary graphics
-card to the VM. The QEMU-emulated graphics card remains the primary
-graphics card, and VNC output is available from the QEMU-emulated
-primary adapter.
-
-More information about Xen gfx_passthru feature is available
-on the XenVGAPassthrough L<http://wiki.xen.org/wiki/XenVGAPassthrough>
-wiki page.
-
-=item B<rdm_mem_boundary=MBYTES>
-
-Number of megabytes to set a boundary for checking rdm conflict.
-
-When RDM conflicts with RAM, RDM probably scatter the whole RAM space.
-Especially multiple RDM entries would worsen this to lead a complicated
-memory layout. So here we're trying to figure out a simple solution to
-avoid breaking existing layout. So when a conflict occurs,
-
- #1. Above a predefined boundary
- - move lowmem_end below reserved region to solve conflict;
-
- #2. Below a predefined boundary
- - Check strict/relaxed policy.
- "strict" policy leads to fail libxl. Note when both policies
- are specified on a given region, 'strict' is always preferred.
- "relaxed" policy issue a warning message and also mask this
- entry INVALID to indicate we shouldn't expose this entry to
- hvmloader.
-
-Here the default is 2G.
-
-=item B<dtdev=[ "DTDEV_PATH", "DTDEV_PATH", ... ]>
-
-Specifies the host device tree nodes to passthrough to this guest. Each
-DTDEV_PATH is the absolute path in the device tree.
-
-=item B<ioports=[ "IOPORT_RANGE", "IOPORT_RANGE", ... ]>
-
-Allow guest to access specific legacy I/O ports. Each B<IOPORT_RANGE>
-is given in hexadecimal and may either a span e.g. C<2f8-2ff>
-(inclusive) or a single I/O port C<2f8>.
-
-It is recommended to use this option only for trusted VMs under
-administrator control.
-
-=item B<iomem=[ "IOMEM_START,NUM_PAGES[@GFN]", "IOMEM_START,NUM_PAGES[@GFN]", ... ]>
-
-Allow auto-translated domains to access specific hardware I/O memory pages.
-
-B<IOMEM_START> is a physical page number. B<NUM_PAGES> is the number of pages
-beginning with B<START_PAGE> to allow access. B<GFN> specifies the guest frame
-number where the mapping will start in the domU's address space. If B<GFN> is
-not given, the mapping will be performed using B<IOMEM_START> as a start in the
-domU's address space, therefore performing an 1:1 mapping as default.
-All of these values must be given in hexadecimal.
-
-Note that the IOMMU won't be updated with the mappings specified with this
-option. This option therefore should not be used to passthrough any
-IOMMU-protected device.
-
-It is recommended to use this option only for trusted VMs under
-administrator control.
-
-=item B<irqs=[ NUMBER, NUMBER, ... ]>
-
-Allow a guest to access specific physical IRQs.
-
-It is recommended to use this option only for trusted VMs under
-administrator control.
-
-=item B<max_event_channels=N>
-
-Limit the guest to using at most N event channels (PV interrupts).
-Guests use hypervisor resources for each event channel they use.
-
-The default of 1023 should be sufficient for typical guests. The
-maximum value depends what the guest supports. Guests supporting the
-FIFO-based event channel ABI support up to 131,071 event channels.
-Other guests are limited to 4095 (64-bit x86 and ARM) or 1023 (32-bit
-x86).
-
-=back
-
-=head2 Paravirtualised (PV) Guest Specific Options
-
-The following options apply only to Paravirtual guests.
-
-=over 4
-
-=item B<bootloader="PROGRAM">
-
-Run C<PROGRAM> to find the kernel image and ramdisk to use. Normally
-C<PROGRAM> would be C<pygrub>, which is an emulation of
-grub/grub2/syslinux. Either B<kernel> or B<bootloader> must be specified
-for PV guests.
-
-=item B<bootloader_args=[ "ARG", "ARG", ...]>
-
-Append B<ARG>s to the arguments to the B<bootloader>
-program. Alternatively if the argument is a simple string then it will
-be split into words at whitespace (this second option is deprecated).
-
-=item B<e820_host=BOOLEAN>
-
-Selects whether to expose the host e820 (memory map) to the guest via
-the virtual e820. When this option is false (0) the guest pseudo-physical
-address space consists of a single contiguous RAM region. When this
-option is specified the virtual e820 instead reflects the host e820
-and contains the same PCI holes. The total amount of RAM represented
-by the memory map is always the same, this option configures only how
-it is laid out.
-
-Exposing the host e820 to the guest gives the guest kernel the
-opportunity to set aside the required part of its pseudo-physical
-address space in order to provide address space to map passedthrough
-PCI devices. It is guest Operating System dependent whether this
-option is required, specifically it is required when using a mainline
-Linux ("pvops") kernel. This option defaults to true (1) if any PCI
-passthrough devices are configured and false (0) otherwise. If you do not
-configure any passthrough devices at domain creation time but expect
-to hotplug devices later then you should set this option. Conversely
-if your particular guest kernel does not require this behaviour then
-it is safe to allow this to be enabled but you may wish to disable it
-anyway.
-
-=item B<pvh=BOOLEAN>
-
-Selects whether to run this PV guest in an HVM container. Default is 0.
-
-=back
-
-=head2 Fully-virtualised (HVM) Guest Specific Options
-
-The following options apply only to HVM guests.
-
-=head3 Boot Device
-
-=over 4
-
-=item B<boot=[c|d|n]>
-
-Selects the emulated virtual device to boot from. Options are hard
-disk (B<c>), cd-rom (B<d>) or network/PXE (B<n>). Multiple options can be
-given and will be attempted in the order they are given. e.g. to boot
-from cd-rom but fallback to the hard disk you can give B<dc>. The
-default is B<cd>.
-
-=back
-
-=head3 Emulated disk controller type
-
-=over 4
-
-=item B<hdtype="STRING">
-
-Select the hd disk type (ide|ahci).
-If hdtype=ahci adds ich9 disk controller in AHCI mode and uses it with
-upstream qemu to emulate disks instead of IDE. It decreases boot time
-but may not be supported by default in Windows xp and older Windows.
-The default is ide.
-
-=back
-
-=head3 Paging
-
-The following options control the mechanisms used to virtualise guest
-memory. The defaults are selected to give the best results for the
-common case and so you should normally leave these options
-unspecified.
-
-=over 4
-
-=item B<hap=BOOLEAN>
-
-Turns "hardware assisted paging" (the use of the hardware nested page
-table feature) on or off. This feature is called EPT (Extended Page
-Tables) by Intel and NPT (Nested Page Tables) or RVI (Rapid
-Virtualisation Indexing) by AMD. Affects HVM guests only. If turned
-off, Xen will run the guest in "shadow page table" mode where the
-guest's page table updates and/or TLB flushes etc. will be emulated.
-Use of HAP is the default when available.
-
-=item B<oos=BOOLEAN>
-
-Turns "out of sync pagetables" on or off. When running in shadow page
-table mode, the guest's page table updates may be deferred as
-specified in the Intel/AMD architecture manuals. However this may
-expose unexpected bugs in the guest, or find bugs in Xen, so it is
-possible to disable this feature. Use of out of sync page tables,
-when Xen thinks it appropriate, is the default.
-
-=item B<shadow_memory=MBYTES>
-
-Number of megabytes to set aside for shadowing guest pagetable pages
-(effectively acting as a cache of translated pages) or to use for HAP
-state. By default this is 1MB per guest vcpu plus 8KB per MB of guest
-RAM. You should not normally need to adjust this value. However if you
-are not using hardware assisted paging (i.e. you are using shadow
-mode) and your guest workload consists of a a very large number of
-similar processes then increasing this value may improve performance.
-
-=back
-
-=head3 Processor and Platform Features
-
-The following options allow various processor and platform level
-features to be hidden or exposed from the guest's point of view. This
-can be useful when running older guest Operating Systems which may
-misbehave when faced with more modern features. In general you should
-accept the defaults for these options wherever possible.
-
-=over 4
-
-=item B<bios="STRING">
-
-Select the virtual firmware that is exposed to the guest.
-By default, a guess is made based on the device model, but sometimes
-it may be useful to request a different one, like UEFI.
-
-=over 4
-
-=item B<rombios>
-
-Loads ROMBIOS, a 16-bit x86 compatible BIOS. This is used by default
-when device_model_version=qemu-xen-traditional. This is the only BIOS
-option supported when device_model_version=qemu-xen-traditional. This is
-the BIOS used by all previous Xen versions.
-
-=item B<seabios>
-
-Loads SeaBIOS, a 16-bit x86 compatible BIOS. This is used by default
-with device_model_version=qemu-xen.
-
-=item B<ovmf>
-
-Loads OVMF, a standard UEFI firmware by Tianocore project.
-Requires device_model_version=qemu-xen.
-
-=back
-
-=item B<pae=BOOLEAN>
-
-Hide or expose the IA32 Physical Address Extensions. These extensions
-make it possible for a 32 bit guest Operating System to access more
-than 4GB of RAM. Enabling PAE also enabled other features such as
-NX. PAE is required if you wish to run a 64-bit guest Operating
-System. In general you should leave this enabled and allow the guest
-Operating System to choose whether or not to use PAE. (X86 only)
-
-=item B<acpi=BOOLEAN>
-
-Expose ACPI (Advanced Configuration and Power Interface) tables from
-the virtual firmware to the guest Operating System. ACPI is required
-by most modern guest Operating Systems. This option is enabled by
-default and usually you should omit it. However it may be necessary to
-disable ACPI for compatibility with some guest Operating Systems.
-
-=item B<acpi_s3=BOOLEAN>
-
-Include the S3 (suspend-to-ram) power state in the virtual firmware
-ACPI table. True (1) by default.
-
-=item B<acpi_s4=BOOLEAN>
-
-Include S4 (suspend-to-disk) power state in the virtual firmware ACPI
-table. True (1) by default.
-
-=item B<apic=BOOLEAN>
-
-Include information regarding APIC (Advanced Programmable Interrupt
-Controller) in the firmware/BIOS tables on a single processor
-guest. This causes the MP (multiprocessor) and PIR (PCI Interrupt
-Routing) tables to be exported by the virtual firmware. This option
-has no effect on a guest with multiple virtual CPUS as they must
-always include these tables. This option is enabled by default and you
-should usually omit it but it may be necessary to disable these
-firmware tables when using certain older guest Operating
-Systems. These tables have been superseded by newer constructs within
-the ACPI tables. (X86 only)
-
-=item B<nx=BOOLEAN>
-
-Hides or exposes the No-eXecute capability. This allows a guest
-Operating system to map pages such that they cannot be executed which
-can enhance security. This options requires that PAE also be
-enabled. (X86 only)
-
-=item B<hpet=BOOLEAN>
-
-Enables or disables HPET (High Precision Event Timer). This option is
-enabled by default and you should usually omit it. It may be necessary
-to disable the HPET in order to improve compatibility with guest
-Operating Systems (X86 only)
-
-=item B<altp2mhvm=BOOLEAN>
-
-Enables or disables hvm guest access to alternate-p2m capability.
-Alternate-p2m allows a guest to manage multiple p2m guest physical
-"memory views" (as opposed to a single p2m). This option is
-disabled by default and is available only to hvm domains.
-You may want this option if you want to access-control/isolate
-access to specific guest physical memory pages accessed by
-the guest, e.g. for HVM domain memory introspection or
-for isolation/access-control of memory between components within
-a single guest hvm domain.
-
-=item B<nestedhvm=BOOLEAN>
-
-Enable or disables guest access to hardware virtualisation features,
-e.g. it allows a guest Operating System to also function as a
-hypervisor. This option is disabled by default. You may want this
-option if you want to run another hypervisor (including another copy
-of Xen) within a Xen guest or to support a guest Operating System
-which uses hardware virtualisation extensions (e.g. Windows XP
-compatibility mode on more modern Windows OS).
-
-=item B<cpuid="LIBXL_STRING"> or B<cpuid=[ "XEND_STRING", "XEND_STRING" ]>
-
-Configure the value returned when a guest executes CPUID instruction.
-Two versions of config syntax are recognized: libxl and xend.
-
-The libxl syntax is a comma separated list of key=value pairs, preceded by the
-word "host". A few keys take a numerical value, all others take a single
-character which describes what to do with the feature bit.
-
-Possible values for a single feature bit:
- '1' -> force the corresponding bit to 1
- '0' -> force to 0
- 'x' -> Get a safe value (pass through and mask with the default policy)
- 'k' -> pass through the host bit value
- 's' -> as 'k' but preserve across save/restore and migration (not implemented)
-
-Note: when specifying B<cpuid> for hypervisor leaves (0x4000xxxx major group)
-only the lowest 8 bits of leaf's 0x4000xx00 EAX register are processed, the rest
-are ignored (these 8 bits signify maximum number of hypervisor leaves).
-
-List of keys taking a value:
-apicidsize brandid clflush family localapicid maxleaf maxhvleaf model nc
-proccount procpkg stepping
-
-List of keys taking a character:
-3dnow 3dnowext 3dnowprefetch abm acpi aes altmovcr8 apic avx clfsh cmov
-cmplegacy cmpxchg16 cmpxchg8 cntxid dca de ds dscpl dtes64 est extapic f16c
-ffxsr fma4 fpu fxsr htt hypervisor ia64 ibs lahfsahf lm lwp mca mce misalignsse
-mmx mmxext monitor movbe msr mtrr nodeid nx osvw osxsave pae page1gb pat pbe
-pclmulqdq pdcm pge popcnt pse pse36 psn rdtscp skinit smx ss sse sse2 sse3
-sse4_1 sse4_2 sse4a ssse3 svm svm_decode svm_lbrv svm_npt svm_nrips
-svm_pausefilt svm_tscrate svm_vmcbclean syscall sysenter tbm tm tm2 topoext tsc
-vme vmx wdt x2apic xop xsave xtpr
-
-The xend syntax is a list of values in the form of
-'leafnum:register=bitstring,register=bitstring'
- "leafnum" is the requested function,
- "register" is the response register to modify
- "bitstring" represents all bits in the register, its length must be 32 chars.
- Each successive character represent a lesser-significant bit, possible values
- are listed above in the libxl section.
-
-Example to hide two features from the guest: 'tm', which is bit #29 in EDX, and
-'pni' (SSE3), which is bit #0 in ECX:
-
-xend: [ '1:ecx=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx0,edx=xx0xxxxxxxxxxxxxxxxxxxxxxxxxxxxx' ]
-
-libxl: 'host,tm=0,sse3=0'
-
-More info about the CPUID instruction can be found in the processor manuals, and
-in Wikipedia: L<http://en.wikipedia.org/wiki/CPUID>
-
-=item B<acpi_firmware="STRING">
-
-Specify a path to a file that contains extra ACPI firmware tables to pass in to
-a guest. The file can contain several tables in their binary AML form
-concatenated together. Each table self describes its length so no additional
-information is needed. These tables will be added to the ACPI table set in the
-guest. Note that existing tables cannot be overridden by this feature. For
-example this cannot be used to override tables like DSDT, FADT, etc.
-
-=item B<smbios_firmware="STRING">
-
-Specify a path to a file that contains extra SMBIOS firmware structures to pass
-in to a guest. The file can contain a set DMTF predefined structures which will
-override the internal defaults. Not all predefined structures can be overridden,
-only the following types: 0, 1, 2, 3, 11, 22, 39. The file can also contain any
-number of vendor defined SMBIOS structures (type 128 - 255). Since SMBIOS
-structures do not present their overall size, each entry in the file must be
-preceded by a 32b integer indicating the size of the next structure.
-
-=item B<ms_vm_genid="OPTION">
-
-Provide a VM generation ID to the guest.
-
-The VM generation ID as a 128-bit random number that a guest may use
-to determine if the guest has been restored from an earlier snapshot
-or cloned.
-
-This is required for Microsoft Windows Server 2012 (and later) domain
-controllers.
-
-Valid options are:
-
-=over 4
-
-=item B<"generate">
-
-Generate a random VM generation ID every time the domain is created or
-restored.
-
-=item B<"none">
-
-Do not provide a VM generation ID.
-
-=back
-
-See also "Virtual Machine Generation ID" by Microsoft
-(http://www.microsoft.com/en-us/download/details.aspx?id=30707).
-
-=back
-
-=head3 Guest Virtual Time Controls
-
-=over 4
-
-=item B<tsc_mode="MODE">
-
-Specifies how the TSC (Time Stamp Counter) should be provided to the
-guest (X86 only). Specifying this option as a number is
-deprecated. Options are:
-
-=over 4
-
-=item B<"default">
-
-Guest rdtsc/p is executed natively when monotonicity can be guaranteed
-and emulated otherwise (with frequency scaled if necessary).
-
-If a HVM container in B<default> TSC mode is created on a host that
-provides constant host TSC, its guest TSC frequency will be the same
-as the host. If it is later migrated to another host that provide
-constant host TSC and supports Intel VMX TSC scaling/AMD SVM TSC
-ratio, its guest TSC frequency will be the same before and after
-migration, and guest rdtsc/p will be executed natively as well after
-migration.
-
-=item B<"always_emulate">
-
-Guest rdtsc/p always emulated at 1GHz (kernel and user). Guest rdtsc/p
-always emulated and the virtual TSC will appear to increment (kernel
-and user) at a fixed 1GHz rate, regardless of the PCPU HZ rate or
-power state; Although there is an overhead associated with emulation
-this will NOT affect underlying CPU performance.
-
-=item B<"native">
-
-Guest rdtsc always executed natively (no monotonicity/frequency
-guarantees); guest rdtscp emulated at native frequency if unsupported
-by h/w, else executed natively.
-
-=item B<"native_paravirt">
-
-Same as B<native>, except xen manages TSC_AUX register so guest can
-determine when a restore/migration has occurred and assumes guest
-obtains/uses pvclock-like mechanism to adjust for monotonicity and
-frequency changes.
-
-If a HVM container in B<native_paravirt> TSC mode can execute both guest
-rdtsc and guest rdtscp natively, then the guest TSC frequency will be
-determined in the similar way to that of B<default> TSC mode.
-
-=back
-
-Please see F<docs/misc/tscmode.txt> for more information on this option.
-
-=item B<localtime=BOOLEAN>
-
-Set the real time clock to local time or to UTC. False (0) by default,
-i.e. set to UTC.
-
-=item B<rtc_timeoffset=SECONDS>
-
-Set the real time clock offset in seconds. False (0) by default.
-
-=item B<vpt_align=BOOLEAN>
-
-Specifies that periodic Virtual Platform Timers should be aligned to
-reduce guest interrupts. Enabling this option can reduce power
-consumption, especially when a guest uses a high timer interrupt
-frequency (HZ) values. The default is true (1).
-
-=item B<timer_mode=MODE>
-
-Specifies the mode for Virtual Timers. The valid values are as follows:
-
-=over 4
-
-=item B<"delay_for_missed_ticks">
-
-Delay for missed ticks. Do not advance a vcpu's time beyond the
-correct delivery time for interrupts that have been missed due to
-preemption. Deliver missed interrupts when the vcpu is rescheduled and
-advance the vcpu's virtual time stepwise for each one.
-
-=item B<"no_delay_for_missed_ticks">
-
-No delay for missed ticks. As above, missed interrupts are delivered,
-but guest time always tracks wallclock (i.e., real) time while doing
-so.
-
-=item B<"no_missed_ticks_pending">
-
-No missed interrupts are held pending. Instead, to ensure ticks are
-delivered at some non-zero rate, if we detect missed ticks then the
-internal tick alarm is not disabled if the VCPU is preempted during
-the next tick period.
-
-=item B<"one_missed_tick_pending">
-
-One missed tick pending. Missed interrupts are collapsed
-together and delivered as one 'late tick'. Guest time always tracks
-wallclock (i.e., real) time.
-
-=back
-
-=back
-
-=head3 Memory layout
-
-=over 4
-
-=item B<mmio_hole=MBYTES>
-
-Specifies the size the MMIO hole below 4GiB will be. Only valid for
-device_model_version = "qemu-xen".
-
-Cannot be smaller than 256. Cannot be larger than 3840.
-
-Known good large value is 3072.
-
-=back
-
-=head3 Support for Paravirtualisation of HVM Guests
-
-The following options allow Paravirtualised features (such as devices)
-to be exposed to the guest Operating System in an HVM guest.
-Utilising these features requires specific guest support but when
-available they will result in improved performance.
-
-=over 4
-
-=item B<xen_platform_pci=BOOLEAN>
-
-Enable or disable the Xen platform PCI device. The presence of this
-virtual device enables a guest Operating System (subject to the
-availability of suitable drivers) to make use of paravirtualisation
-features such as disk and network devices etc. Enabling these drivers
-improves performance and is strongly recommended when available. PV
-drivers are available for various Operating Systems including HVM
-Linux L<http://wiki.xen.org/wiki/XenLinuxPVonHVMdrivers> and Microsoft
-Windows L<http://wiki.xen.org/wiki/XenWindowsGplPv>.
-
-Setting B<xen_platform_pci=0> with the default device_model "qemu-xen"
-requires at least QEMU 1.6.
-
-=item B<viridian=[ "GROUP", "GROUP", ...]>
-
-The groups of Microsoft Hyper-V (AKA viridian) compatible enlightenments
-exposed to the guest. The following groups of enlightenments may be
-specified:
-
-=over 4
-
-=item B<base>
-
-This group incorporates the Hypercall MSRs, Virtual processor index MSR,
-and APIC access MSRs. These enlightenments can improve performance of
-Windows Vista and Windows Server 2008 onwards and setting this option
-for such guests is strongly recommended.
-This group is also a pre-requisite for all others. If it is disabled
-then it is an error to attempt to enable any other group.
-
-=item B<freq>
-
-This group incorporates the TSC and APIC frequency MSRs. These
-enlightenments can improve performance of Windows 7 and Windows
-Server 2008 R2 onwards.
-
-=item B<time_ref_count>
-
-This group incorporates Partition Time Reference Counter MSR. This
-enlightenment can improve performance of Windows 8 and Windows
-Server 2012 onwards.
-
-=item B<reference_tsc>
-
-This set incorporates the Partition Reference TSC MSR. This
-enlightenment can improve performance of Windows 7 and Windows
-Server 2008 R2 onwards.
-
-=item B<hcall_remote_tlb_flush>
-
-This set incorporates use of hypercalls for remote TLB flushing.
-This enlightenment may improve performance of Windows guests running
-on hosts with higher levels of (physical) CPU contention.
-
-=item B<apic_assist>
-
-This set incorporates use of the APIC assist page to avoid EOI of
-the local APIC.
-This enlightenment may improve performance of guests that make use of
-per-vcpu event channel upcall vectors.
-Note that this enlightenment will have no effect if the guest is
-using APICv posted interrupts.
-
-=item B<defaults>
-
-This is a special value that enables the default set of groups, which
-is currently the B<base>, B<freq>, B<time_ref_count> and B<apic_assist>
-groups.
-
-=item B<all>
-
-This is a special value that enables all available groups.
-
-=back
-
-Groups can be disabled by prefixing the name with '!'. So, for example,
-to enable all groups except B<freq>, specify:
-
-=over 4
-
-B<viridian=[ "all", "!freq" ]>
-
-=back
-
-For details of the enlightenments see the latest version of Microsoft's
-Hypervisor Top-Level Functional Specification.
-
-The enlightenments should be harmless for other versions of Windows
-(although they will not give any benefit) and the majority of other
-non-Windows OSes.
-However it is known that they are incompatible with some other Operating
-Systems and in some circumstance can prevent Xen's own paravirtualisation
-interfaces for HVM guests from being used.
-
-The viridian option can be specified as a boolean. A value of true (1)
-is equivalent to the list [ "defaults" ], and a value of false (0) is
-equivalent to an empty list.
-
-=back
-
-=head3 Emulated VGA Graphics Device
-
-The following options control the features of the emulated graphics
-device. Many of these options behave similarly to the equivalent key
-in the B<VFB_SPEC_STRING> for configuring virtual frame buffer devices
-(see above).
-
-=over 4
-
-=item B<videoram=MBYTES>
-
-Sets the amount of RAM which the emulated video card will contain,
-which in turn limits the resolutions and bit depths which will be
-available.
-
-When using the qemu-xen-traditional device-model, the default as well as
-minimum amount of video RAM for stdvga is 8 MB, which is sufficient for e.g.
-1600x1200 at 32bpp. For the upstream qemu-xen device-model, the default and
-minimum is 16 MB.
-
-When using the emulated Cirrus graphics card (B<vga="cirrus">) and the
-qemu-xen-traditional device-model, the amount of video RAM is fixed at 4 MB,
-which is sufficient for 1024x768 at 32 bpp. For the upstream qemu-xen
-device-model, the default and minimum is 8 MB.
-
-For B<qxl> vga, the default is both default and minimal 128MB.
-If B<videoram> is set less than 128MB, an error will be triggered.
-
-=item B<stdvga=BOOLEAN>
-
-Select a standard VGA card with VBE (VESA BIOS Extensions) as the
-emulated graphics device. The default is false (0) which means to emulate
-a Cirrus Logic GD5446 VGA card. If your guest supports VBE 2.0 or
-later (e.g. Windows XP onwards) then you should enable this.
-stdvga supports more video ram and bigger resolutions than Cirrus.
-This option is deprecated, use vga="stdvga" instead.
-
-=item B<vga="STRING">
-
-Selects the emulated video card (none|stdvga|cirrus|qxl).
-The default is cirrus.
-
-In general, QXL should work with the Spice remote display protocol
-for acceleration, and QXL driver is necessary in guest in this case.
-QXL can also work with the VNC protocol, but it will be like a standard
-VGA without acceleration.
-
-=item B<vnc=BOOLEAN>
-
-Allow access to the display via the VNC protocol. This enables the
-other VNC-related settings. The default is to enable this.
-
-=item B<vnclisten="ADDRESS[:DISPLAYNUM]">
-
-Specifies the IP address, and optionally VNC display number, to use.
-
-=item B<vncdisplay=DISPLAYNUM>
-
-Specifies the VNC display number to use. The actual TCP port number
-will be DISPLAYNUM+5900.
-
-=item B<vncunused=BOOLEAN>
-
-Requests that the VNC display setup search for a free TCP port to use.
-The actual display used can be accessed with C<xl vncviewer>.
-
-=item B<vncpasswd="PASSWORD">
-
-Specifies the password for the VNC server.
-
-=item B<keymap="LANG">
-
-Configure the keymap to use for the keyboard associated with this
-display. If the input method does not easily support raw keycodes
-(e.g. this is often the case when using VNC) then this allows us to
-correctly map the input keys into keycodes seen by the guest. The
-specific values which are accepted are defined by the version of the
-device-model which you are using. See L</"Keymaps"> below or consult the
-L<qemu(1)> manpage. The default is B<en-us>.
-
-=item B<sdl=BOOLEAN>
-
-Specifies that the display should be presented via an X window (using
-Simple DirectMedia Layer). The default is not to enable this mode.
-
-=item B<opengl=BOOLEAN>
-
-Enable OpenGL acceleration of the SDL display. Only effects machines
-using B<device_model_version="qemu-xen-traditional"> and only if the
-device-model was compiled with OpenGL support. False (0) by default.
-
-=item B<nographic=BOOLEAN>
-
-Enable or disable the virtual graphics device. The default is to
-provide a VGA graphics device but this option can be used to disable
-it.
-
-=back
-
-=head3 Spice Graphics Support
-
-The following options control the features of SPICE.
-
-=over 4
-
-=item B<spice=BOOLEAN>
-
-Allow access to the display via the SPICE protocol. This enables the
-other SPICE-related settings.
-
-=item B<spicehost="ADDRESS">
-
-Specify the interface address to listen on if given, otherwise any
-interface.
-
-=item B<spiceport=NUMBER>
-
-Specify the port to listen on by the SPICE server if the SPICE is
-enabled.
-
-=item B<spicetls_port=NUMBER>
-
-Specify the secure port to listen on by the SPICE server if the SPICE
-is enabled. At least one of the spiceport or spicetls_port must be
-given if SPICE is enabled. NB. the options depending on spicetls_port
-have not been supported.
-
-=item B<spicedisable_ticketing=BOOLEAN>
-
-Enable client connection without password. When disabled, spicepasswd
-must be set. The default is false (0).
-
-=item B<spicepasswd="PASSWORD">
-
-Specify the ticket password which is used by a client for connection.
-
-=item B<spiceagent_mouse=BOOLEAN>
-
-Whether SPICE agent is used for client mouse mode. The default is true (1)
-(turn on)
-
-=item B<spicevdagent=BOOLEAN>
-
-Enables spice vdagent. The Spice vdagent is an optional component for
-enhancing user experience and performing guest-oriented management
-tasks. Its features includes: client mouse mode (no need to grab mouse
-by client, no mouse lag), automatic adjustment of screen resolution,
-copy and paste (text and image) between client and domU. It also
-requires vdagent service installed on domU o.s. to work. The default is 0.
-
-=item B<spice_clipboard_sharing=BOOLEAN>
-
-Enables Spice clipboard sharing (copy/paste). It requires spicevdagent
-enabled. The default is false (0).
-
-=item B<spiceusbredirection=NUMBER>
-
-Enables spice usbredirection. Creates NUMBER usbredirection channels
-for redirection of up to 4 usb devices from spice client to domU's qemu.
-It requires an usb controller and if not defined it will automatically adds
-an usb2 controller. The default is disabled (0).
-
-=item B<spice_image_compression=[auto_glz|auto_lz|quic|glz|lz|off]>
-
-Specifies what image compression is to be used by spice (if given), otherwise
-the qemu default will be used. Please see documentations of your current qemu
-version for details.
-
-=item B<spice_streaming_video=[filter|all|off]>
-
-Specifies what streaming video setting is to be used by spice (if given),
-otherwise the qemu default will be used.
-
-=back
-
-=head3 Miscellaneous Emulated Hardware
-
-=over 4
-
-=item B<serial=[ "DEVICE", "DEVICE", ...]>
-
-Redirect virtual serial ports to B<DEVICE>s. Please see the
-B<-serial> option in the L<qemu(1)> manpage for details of the valid
-B<DEVICE> options. Default is B<vc> when in graphical mode and
-B<stdio> if B<nographics=1> is used.
-
-The form serial=DEVICE is also accepted for backwards compatibilty.
-
-=item B<soundhw=DEVICE>
-
-Select the virtual sound card to expose to the guest. The valid
-devices are defined by the device model configuration, please see the
-L<qemu(1)> manpage for details. The default is not to export any sound
-device.
-
-=item B<usb=BOOLEAN>
-
-Enables or disables an emulated USB bus in the guest.
-
-=item B<usbversion=NUMBER>
-
-Specifies the type of an emulated USB bus in the guest. 1 for usb1,
-2 for usb2 and 3 for usb3, it is available only with upstream qemu.
-Due to implementation limitations this is not compatible with the usb
-and usbdevice parameters.
-Default is 0 (no usb controller defined).
-
-=item B<usbdevice=[ "DEVICE", "DEVICE", ...]>
-
-Adds B<DEVICE>s to the emulated USB bus. The USB bus must also be
-enabled using B<usb=1>. The most common use for this option is
-B<usbdevice=['tablet']> which adds pointer device using absolute
-coordinates. Such devices function better than relative coordinate
-devices (such as a standard mouse) since many methods of exporting
-guest graphics (such as VNC) work better in this mode. Note that this
-is independent of the actual pointer device you are using on the
-host/client side.
-
-Host devices can also be passed through in this way, by specifying
-host:USBID, where USBID is of the form xxxx:yyyy. The USBID can
-typically be found by using lsusb or usb-devices.
-
-If you wish to use the "host:bus.addr" format, remove any leading '0' from the
-bus and addr. For example, for the USB device on bus 008 dev 002, you should
-write "host:8.2".
-
-The form usbdevice=DEVICE is also accepted for backwards compatibility.
-
-More valid options can be found in the "usbdevice" section of the qemu
-documentation.
-
-=item B<vendor_device="VENDOR_DEVICE">
-
-Selects which variant of the QEMU xen-pvdevice should be used for this
-guest. Valid values are:
-
-=over 4
-
-=item B<none>
-
-The xen-pvdevice should be omitted. This is the default.
-
-=item B<xenserver>
-
-The xenserver variant of the xen-pvdevice (device-id=C000) will be
-specified, enabling the use of XenServer PV drivers in the guest.
-
-=back
-
-This parameter only takes effect when device_model_version=qemu-xen.
-See F<docs/misc/pci-device-reservations.txt> for more information.
-
-=back
-
-=head2 Device-Model Options
-
-The following options control the selection of the device-model. This
-is the component which provides emulation of the virtual devices to an
-HVM guest. For a PV guest a device-model is sometimes used to provide
-backends for certain PV devices (most usually a virtual framebuffer
-device).
-
-=over 4
-
-=item B<device_model_version="DEVICE-MODEL">
-
-Selects which variant of the device-model should be used for this
-guest. Valid values are:
-
-=over 4
-
-=item B<qemu-xen>
-
-Use the device-model merged into the upstream QEMU project.
-This device-model is the default for Linux dom0.
-
-=item B<qemu-xen-traditional>
-
-Use the device-model based upon the historical Xen fork of Qemu.
-This device-model is still the default for NetBSD dom0.
-
-=item B<none>
-
-Don't use any device model. This requires a kernel capable of booting
-without emulated devices.
-
-=back
-
-It is recommended to accept the default value for new guests. If
-you have existing guests then, depending on the nature of the guest
-Operating System, you may wish to force them to use the device
-model which they were installed with.
-
-=item B<device_model_override="PATH">
-
-Override the path to the binary to be used as the device-model. The
-binary provided here MUST be consistent with the
-`device_model_version` which you have specified. You should not
-normally need to specify this option.
-
-=item B<device_model_stubdomain_override=BOOLEAN>
-
-Override the use of stubdomain based device-model. Normally this will
-be automatically selected based upon the other features and options
-you have selected.
-
-=item B<device_model_stubdomain_seclabel="LABEL">
-
-Assign an XSM security label to the device-model stubdomain.
-
-=item B<device_model_args=[ "ARG", "ARG", ...]>
-
-Pass additional arbitrary options on the device-model command
-line. Each element in the list is passed as an option to the
-device-model.
-
-=item B<device_model_args_pv=[ "ARG", "ARG", ...]>
-
-Pass additional arbitrary options on the device-model command line for
-a PV device model only. Each element in the list is passed as an
-option to the device-model.
-
-=item B<device_model_args_hvm=[ "ARG", "ARG", ...]>
-
-Pass additional arbitrary options on the device-model command line for
-an HVM device model only. Each element in the list is passed as an
-option to the device-model.
-
-=back
-
-=head2 Keymaps
-
-The keymaps available are defined by the device-model which you are
-using. Commonly this includes:
-
- ar de-ch es fo fr-ca hu ja mk no pt-br sv
- da en-gb et fr fr-ch is lt nl pl ru th
- de en-us fi fr-be hr it lv nl-be pt sl tr
-
-The default is B<en-us>.
-
-See L<qemu(1)> for more information.
-
-=head2 Architecture Specific options
-
-=head3 ARM
-
-=over 4
-
-=item B<gic_version="vN">
-
-Version of the GIC emulated for the guest. Currently, the following
-versions are supported:
-
-=over 4
-
-=item B<v2>
-
-Emulate a GICv2
-
-=item B<v3>
-
-Emulate a GICv3. Note that the emulated GIC does not support the
-GICv2 compatibility mode.
-
-=item B<default>
-
-Emulate the same version as the native GIC hardware used by host where
-the domain was created.
-
-=back
-
-This requires hardware compatibility with the requested version. Either
-natively or via hardware backwards compatibility support.
-
-=back
-
-=head1 SEE ALSO
-
-=over 4
-
-=item L<xl(1)>
-
-=item L<xlcpupool.cfg(5)>
-
-=item F<xl-disk-configuration>
-
-=item F<xl-network-configuration>
-
-=item F<docs/misc/tscmode.txt>
-
-=back
-
-=head1 FILES
-
-F</etc/xen/NAME.cfg>
-F</var/lib/xen/dump/NAME>
-
-=head1 BUGS
-
-This document may contain items which require further
-documentation. Patches to improve incomplete items (or any other item)
-are gratefully received on the xen-devel@lists.xen.org mailing
-list. Please see L<http://wiki.xen.org/wiki/SubmittingXenPatches> for
-information on how to submit a patch to Xen.
-
--- /dev/null
+=head1 NAME
+
+xl.cfg - XL Domain Configuration File Syntax
+
+=head1 SYNOPSIS
+
+ /etc/xen/xldomain
+
+=head1 DESCRIPTION
+
+To create a VM (a domain in Xen terminology, sometimes called a guest)
+with xl requires the provision of a domain config file. Typically
+these live in `/etc/xen/DOMAIN.cfg` where DOMAIN is the name of the
+domain.
+
+=head1 SYNTAX
+
+A domain config file consists of a series of C<KEY=VALUE> pairs.
+
+Some C<KEY>s are mandatory, others are general options which apply to
+any guest type while others relate only to specific guest types
+(e.g. PV or HVM guests).
+
+A value C<VALUE> is one of:
+
+=over 4
+
+=item B<"STRING">
+
+A string, surrounded by either single or double quotes.
+
+=item B<NUMBER>
+
+A number, in either decimal, octal (using a C<0> prefix) or
+hexadecimal (using an C<0x> prefix).
+
+=item B<BOOLEAN>
+
+A C<NUMBER> interpreted as C<False> (C<0>) or C<True> (any other
+value).
+
+=item B<[ VALUE, VALUE, ... ]>
+
+A list of C<VALUES> of the above types. Lists can be heterogeneous and
+nested.
+
+=back
+
+The semantics of each C<KEY> defines which form of C<VALUE> is required.
+
+Pairs may be separated either by a newline or a semicolon. Both
+of the following are valid:
+
+ name="h0"
+ builder="hvm"
+
+ name="h0"; builder="hvm"
+
+=head1 OPTIONS
+
+=head2 Mandatory Configuration Items
+
+The following key is mandatory for any guest type:
+
+=over 4
+
+=item B<name="NAME">
+
+Specifies the name of the domain. Names of domains existing on a
+single host must be unique.
+
+=back
+
+=head2 Selecting Guest Type
+
+=over 4
+
+=item B<builder="generic">
+
+Specifies that this is to be a PV domain. This is the default.
+
+=item B<builder="hvm">
+
+Specifies that this is to be an HVM domain. That is, a fully
+virtualised computer with emulated BIOS, disk and network peripherals,
+etc. The default is a PV domain, suitable for hosting Xen-aware guest
+operating systems.
+
+=back
+
+=head2 General Options
+
+The following options apply to guests of any type.
+
+=head3 CPU Allocation
+
+=over 4
+
+=item B<pool="CPUPOOLNAME">
+
+Put the guest's vcpus into the named cpu pool.
+
+=item B<vcpus=N>
+
+Start the guest with N vcpus initially online.
+
+=item B<maxvcpus=M>
+
+Allow the guest to bring up a maximum of M vcpus. At start of day if
+`vcpus=N` is less than `maxvcpus=M` then the first `N` vcpus will be
+created online and the remainder will be offline.
+
+=item B<cpus="CPU-LIST">
+
+List of which cpus the guest is allowed to use. Default is no pinning at
+all (more on this below). A C<CPU-LIST> may be specified as follows:
+
+=over 4
+
+=item "all"
+
+To allow all the vcpus of the guest to run on all the cpus on the host.
+
+=item "0-3,5,^1"
+
+To allow all the vcpus of the guest to run on cpus 0,2,3,5. Combining
+this with "all" is possible, meaning "all,^7" results in all the vcpus
+of the guest running on all the cpus on the host except cpu 7.
+
+=item "nodes:0-3,node:^2"
+
+To allow all the vcpus of the guest to run on the cpus from NUMA nodes
+0,1,3 of the host. So, if cpus 0-3 belongs to node 0, cpus 4-7 belongs
+to node 1 and cpus 8-11 to node 3, the above would mean all the vcpus
+of the guest will run on cpus 0-3,8-11.
+
+Combining this notation with the one above is possible. For instance,
+"1,node:2,^6", means all the vcpus of the guest will run on cpu 1 and
+on all the cpus of NUMA node 2, but not on cpu 6. Following the same
+example as above, that would be cpus 1,4,5,7.
+
+Combining this with "all" is also possible, meaning "all,^nodes:1"
+results in all the vcpus of the guest running on all the cpus on the
+host, except for the cpus belonging to the host NUMA node 1.
+
+=item ["2", "3-8,^5"]
+
+To ask for specific vcpu mapping. That means (in this example), vcpu 0
+of the guest will run on cpu 2 of the host and vcpu 1 of the guest will
+run on cpus 3,4,6,7,8 of the host.
+
+More complex notation can be also used, exactly as described above. So
+"all,^5-8", or just "all", or "node:0,node:2,^9-11,18-20" are all legal,
+for each element of the list.
+
+=back
+
+If this option is not specified, no vcpu to cpu pinning is established,
+and the vcpus of the guest can run on all the cpus of the host. If this
+option is specified, the intersection of the vcpu pinning mask, provided
+here, and the soft affinity mask, provided via B<cpus\_soft=> (if any),
+is utilized to compute the domain node-affinity, for driving memory
+allocations.
+
+=item B<cpus_soft="CPU-LIST">
+
+Exactly as B<cpus=>, but specifies soft affinity, rather than pinning
+(hard affinity). When using the credit scheduler, this means what cpus
+the vcpus of the domain prefer.
+
+A C<CPU-LIST> is specified exactly as above, for B<cpus=>.
+
+If this option is not specified, the vcpus of the guest will not have
+any preference regarding on what cpu to run. If this option is specified,
+the intersection of the soft affinity mask, provided here, and the vcpu
+pinning, provided via B<cpus=> (if any), is utilized to compute the
+domain node-affinity, for driving memory allocations.
+
+If this option is not specified (and B<cpus=> is not specified either),
+libxl automatically tries to place the guest on the least possible
+number of nodes. A heuristic approach is used for choosing the best
+node (or set of nodes), with the goal of maximizing performance for
+the guest and, at the same time, achieving efficient utilization of
+host cpus and memory. In that case, the soft affinity of all the vcpus
+of the domain will be set to the pcpus belonging to the NUMA nodes
+chosen during placement.
+
+For more details, see F<docs/misc/xl-numa-placement.markdown>.
+
+=back
+
+=head3 CPU Scheduling
+
+=over 4
+
+=item B<cpu_weight=WEIGHT>
+
+A domain with a weight of 512 will get twice as much CPU as a domain
+with a weight of 256 on a contended host.
+Legal weights range from 1 to 65535 and the default is 256.
+Honoured by the credit and credit2 schedulers.
+
+=item B<cap=N>
+
+The cap optionally fixes the maximum amount of CPU a domain will be
+able to consume, even if the host system has idle CPU cycles.
+The cap is expressed in percentage of one physical CPU:
+100 is 1 physical CPU, 50 is half a CPU, 400 is 4 CPUs, etc.
+The default, 0, means there is no upper cap.
+Honoured by the credit and credit2 schedulers.
+
+NB: Many systems have features that will scale down the computing
+power of a cpu that is not 100% utilized. This can be in the
+operating system, but can also sometimes be below the operating system
+in the BIOS. If you set a cap such that individual cores are running
+at less than 100%, this may have an impact on the performance of your
+workload over and above the impact of the cap. For example, if your
+processor runs at 2GHz, and you cap a vm at 50%, the power management
+system may also reduce the clock speed to 1GHz; the effect will be
+that your VM gets 25% of the available power (50% of 1GHz) rather than
+50% (50% of 2GHz). If you are not getting the performance you expect,
+look at performance and cpufreq options in your operating system and
+your BIOS.
+
+=back
+
+=head3 Memory Allocation
+
+=over 4
+
+=item B<memory=MBYTES>
+
+Start the guest with MBYTES megabytes of RAM.
+
+=item B<maxmem=MBYTES>
+
+Specifies the maximum amount of memory a guest can ever see.
+The value of B<maxmem=> must be equal or greater than B<memory=>.
+
+In combination with B<memory=> it will start the guest "pre-ballooned",
+if the values of B<memory=> and B<maxmem=> differ.
+A "pre-ballooned" HVM guest needs a balloon driver, without a balloon driver
+it will crash.
+
+NOTE: Because of the way ballooning works, the guest has to allocate
+memory to keep track of maxmem pages, regardless of how much memory it
+actually has available to it. A guest with maxmem=262144 and
+memory=8096 will report significantly less memory available for use
+than a system with maxmem=8096 memory=8096 due to the memory overhead
+of having to track the unused pages.
+
+=back
+
+=head3 Guest Virtual NUMA Configuration
+
+=over 4
+
+=item B<vnuma=[ VNODE_SPEC, VNODE_SPEC, ... ]>
+
+Specify virtual NUMA configuration with positional arguments. The
+nth B<VNODE_SPEC> in the list specifies the configuration of nth
+virtual node.
+
+Note that virtual NUMA for PV guest is not yet supported, because
+there is an issue with cpuid handling that affects PV virtual NUMA.
+Furthermore, guests with virtual NUMA cannot be saved or migrated
+because the migration stream does not preserve node information.
+
+Each B<VNODE_SPEC> is a list, which has a form of
+"[VNODE_CONFIG_OPTION,VNODE_CONFIG_OPTION, ... ]" (without quotes).
+
+For example vnuma = [ ["pnode=0","size=512","vcpus=0-4","vdistances=10,20"] ]
+means vnode 0 is mapped to pnode 0, has 512MB ram, has vcpus 0 to 4, the
+distance to itself is 10 and the distance to vnode 1 is 20.
+
+Each B<VNODE_CONFIG_OPTION> is a quoted key=value pair. Supported
+B<VNODE_CONFIG_OPTION>s are (they are all mandatory at the moment):
+
+=over 4
+
+=item B<pnode=NUMBER>
+
+Specify which physical node this virtual node maps to.
+
+=item B<size=MBYTES>
+
+Specify the size of this virtual node. The sum of memory size of all
+vnodes will become B<maxmem=>. If B<maxmem=> is specified separately,
+a check is performed to make sure the sum of all vnode memory matches
+B<maxmem=>.
+
+=item B<vcpus=CPU-STRING>
+
+Specify which vcpus belong to this node. B<CPU-STRING> is a string
+separated by comma. You can specify range and single cpu. An example
+is "vcpus=0-5,8", which means you specify vcpu 0 to vcpu 5, and vcpu
+8.
+
+=item B<vdistances=NUMBER, NUMBER, ... >
+
+Specify virtual distance from this node to all nodes (including
+itself) with positional arguments. For example, "vdistance=10,20"
+for vnode 0 means the distance from vnode 0 to vnode 0 is 10, from
+vnode 0 to vnode 1 is 20. The number of arguments supplied must match
+the total number of vnodes.
+
+Normally you can use the values from "xl info -n" or "numactl
+--hardware" to fill in vdistance list.
+
+=back
+
+=back
+
+=head3 Event Actions
+
+=over 4
+
+=item B<on_poweroff="ACTION">
+
+Specifies what should be done with the domain if it shuts itself down.
+The C<ACTION>s are:
+
+=over 4
+
+=item B<destroy>
+
+destroy the domain
+
+=item B<restart>
+
+destroy the domain and immediately create a new domain with the same
+configuration
+
+=item B<rename-restart>
+
+rename the domain which terminated, and then immediately create a new
+domain with the same configuration as the original
+
+=item B<preserve>
+
+keep the domain. It can be examined, and later destroyed with `xl
+destroy`.
+
+=item B<coredump-destroy>
+
+write a "coredump" of the domain to F<@XEN_DUMP_DIR@/NAME> and then
+destroy the domain.
+
+=item B<coredump-restart>
+
+write a "coredump" of the domain to F<@XEN_DUMP_DIR@/NAME> and then
+restart the domain.
+
+=item B<soft-reset>
+
+Reset all Xen specific interfaces for the Xen-aware HVM domain allowing
+it to reestablish these interfaces and continue executing the domain. PV
+and non-Xen-aware HVM guests are not supported.
+
+=back
+
+The default for C<on_poweroff> is C<destroy>.
+
+=item B<on_reboot="ACTION">
+
+Action to take if the domain shuts down with a reason code requesting
+a reboot. Default is C<restart>.
+
+=item B<on_watchdog="ACTION">
+
+Action to take if the domain shuts down due to a Xen watchdog timeout.
+Default is C<destroy>.
+
+=item B<on_crash="ACTION">
+
+Action to take if the domain crashes. Default is C<destroy>.
+
+=item B<on_soft_reset="ACTION">
+
+Action to take if the domain performs 'soft reset' (e.g. does kexec).
+Default is C<soft-reset>.
+
+=back
+
+=head3 Direct Kernel Boot
+
+Direct kernel boot allows booting directly from a kernel and initrd
+stored in the host physical machine OS, allowing command line arguments
+to be passed directly. PV guest direct kernel boot is supported. HVM
+guest direct kernel boot is supported with limitation (it's supported
+when using qemu-xen and default BIOS 'seabios'; not supported in case of
+stubdom-dm and old rombios.)
+
+=over 4
+
+=item B<kernel="PATHNAME">
+
+Load the specified file as the kernel image.
+
+=item B<ramdisk="PATHNAME">
+
+Load the specified file as the ramdisk.
+
+=item B<cmdline="STRING">
+
+Append B<cmdline="STRING"> to the kernel command line. (Note: it is
+guest specific what meaning this has). It can replace B<root="STRING">
+plus B<extra="STRING"> and is preferred. When B<cmdline="STRING"> is set,
+B<root="STRING"> and B<extra="STRING"> will be ignored.
+
+=item B<root="STRING">
+
+Append B<root="STRING"> to the kernel command line (Note: it is guest
+specific what meaning this has).
+
+=item B<extra="STRING">
+
+Append B<STRING> to the kernel command line. (Note: it is guest
+specific what meaning this has).
+
+=back
+
+=head3 Other Options
+
+=over 4
+
+=item B<uuid="UUID">
+
+Specifies the UUID of the domain. If not specified, a fresh unique
+UUID will be generated.
+
+=item B<seclabel="LABEL">
+
+Assign an XSM security label to this domain.
+
+=item B<init_seclabel="LABEL">
+
+Specify an XSM security label used for this domain temporarily during
+its build. The domain's XSM label will be changed to the execution
+seclabel (specified by "seclabel") once the build is complete, prior to
+unpausing the domain. With a properly constructed security policy (such
+as nomigrate_t in the example policy), this can be used to build a
+domain whose memory is not accessible to the toolstack domain.
+
+=item B<nomigrate=BOOLEAN>
+
+Disable migration of this domain. This enables certain other features
+which are incompatible with migration. Currently this is limited to
+enabling the invariant TSC feature flag in cpuid results when TSC is
+not emulated.
+
+=item B<driver_domain=BOOLEAN>
+
+Specify that this domain is a driver domain. This enables certain
+features needed in order to run a driver domain.
+
+=item B<device_tree=PATH>
+
+Specify a partial device tree (compiled via the Device Tree Compiler).
+Everything under the node "/passthrough" will be copied into the guest
+device tree. For convenience, the node "/aliases" is also copied to allow
+the user to defined aliases which can be used by the guest kernel.
+
+Given the complexity of verifying the validity of a device tree, this
+option should only be used with trusted device tree.
+
+Note that the partial device tree should avoid to use the phandle 65000
+which is reserved by the toolstack.
+
+=back
+
+=head2 Devices
+
+The following options define the paravirtual, emulated and physical
+devices which the guest will contain.
+
+=over 4
+
+=item B<disk=[ "DISK_SPEC_STRING", "DISK_SPEC_STRING", ...]>
+
+Specifies the disks (both emulated disks and Xen virtual block
+devices) which are to be provided to the guest, and what objects on
+the they should map to. See F<docs/misc/xl-disk-configuration.txt>.
+
+=item B<vif=[ "NET_SPEC_STRING", "NET_SPEC_STRING", ...]>
+
+Specifies the networking provision (both emulated network adapters,
+and Xen virtual interfaces) to provided to the guest. See
+F<docs/misc/xl-network-configuration.markdown>.
+
+=item B<vtpm=[ "VTPM_SPEC_STRING", "VTPM_SPEC_STRING", ...]>
+
+Specifies the virtual trusted platform module to be
+provided to the guest. Please see F<docs/misc/vtpm.txt>
+for more details.
+
+Each B<VTPM_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
+settings, from the following list:
+
+=over 4
+
+=item C<backend=DOMAIN>
+
+Specify the backend domain name of id. This value is required!
+If this domain is a guest, the backend should be set to the
+vtpm domain name. If this domain is a vtpm, the
+backend should be set to the vtpm manager domain name.
+
+=item C<uuid=UUID>
+
+Specify the uuid of this vtpm device. The uuid is used to uniquely
+identify the vtpm device. You can create one using the uuidgen
+program on unix systems. If left unspecified, a new uuid
+will be randomly generated every time the domain boots.
+If this is a vtpm domain, you should specify a value. The
+value is optional if this is a guest domain.
+
+=back
+
+=item B<vfb=[ "VFB_SPEC_STRING", "VFB_SPEC_STRING", ...]>
+
+Specifies the paravirtual framebuffer devices which should be supplied
+to the domain.
+
+This option does not control the emulated graphics card presented to
+an HVM guest. See L<Emulated VGA Graphics Device> below for how to
+configure the emulated device. If L<Emulated VGA Graphics Device> options
+are used in a PV guest configuration, xl will pick up B<vnc>, B<vnclisten>,
+B<vncpasswd>, B<vncdisplay>, B<vncunused>, B<sdl>, B<opengl> and
+B<keymap> to construct paravirtual framebuffer device for the guest.
+
+Each B<VFB_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
+settings, from the following list:
+
+=over 4
+
+=item C<vnc=BOOLEAN>
+
+Allow access to the display via the VNC protocol. This enables the
+other VNC-related settings. The default is to enable this.
+
+=item C<vnclisten="ADDRESS[:DISPLAYNUM]">
+
+Specifies the IP address, and optionally VNC display number, to use.
+
+NB that if you specify the display number here, you should not use
+vncdisplay.
+
+=item C<vncdisplay=DISPLAYNUM>
+
+Specifies the VNC display number to use. The actual TCP port number
+will be DISPLAYNUM+5900.
+
+NB that you should not use this option if you set the displaynum in the
+vnclisten string.
+
+=item C<vncunused=BOOLEAN>
+
+Requests that the VNC display setup search for a free TCP port to use.
+The actual display used can be accessed with C<xl vncviewer>.
+
+=item C<vncpasswd="PASSWORD">
+
+Specifies the password for the VNC server.
+
+=item C<sdl=BOOLEAN>
+
+Specifies that the display should be presented via an X window (using
+Simple DirectMedia Layer). The default is to not enable this mode.
+
+=item C<display=DISPLAY>
+
+Specifies the X Window display that should be used when the sdl option
+is used.
+
+=item C<xauthority=XAUTHORITY>
+
+Specifies the path to the X authority file that should be used to
+connect to the X server when the sdl option is used.
+
+=item C<opengl=BOOLEAN>
+
+Enable OpenGL acceleration of the SDL display. Only effects machines
+using C<device_model_version="qemu-xen-traditional"> and only if the
+device-model was compiled with OpenGL support. Disabled by default.
+
+=item C<keymap="LANG">
+
+Configure the keymap to use for the keyboard associated with this
+display. If the input method does not easily support raw keycodes
+(e.g. this is often the case when using VNC) then this allows us to
+correctly map the input keys into keycodes seen by the guest. The
+specific values which are accepted are defined by the version of the
+device-model which you are using. See L</"Keymaps"> below or consult the
+L<qemu(1)> manpage. The default is B<en-us>.
+
+=back
+
+=item B<channel=[ "CHANNEL_SPEC_STRING", "CHANNEL_SPEC_STRING", ...]>
+
+Specifies the virtual channels to be provided to the guest. A
+channel is a low-bandwidth, bidirectional byte stream, which resembles
+a serial link. Typical uses for channels include transmitting VM
+configuration after boot and signalling to in-guest agents. Please see
+F<docs/misc/channels.txt> for more details.
+
+Each B<CHANNEL_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
+seettings. Leading and trailing whitespace is ignored in both KEY and
+VALUE. Neither KEY nor VALUE may contain ',', '=' or '"'. Defined values
+are:
+
+=over 4
+
+=item C<backend=DOMAIN>
+
+Specify the backend domain name or id. This parameter is optional. If
+this parameter is omitted then the toolstack domain will be assumed.
+
+=item C<name=NAME>
+
+Specify the string name for this device. This parameter is mandatory.
+This should be a well-known name for the specific application (e.g.
+guest agent) and should be used by the frontend to connect the
+application to the right channel device. There is no formal registry
+of channel names, so application authors are encouraged to make their
+names unique by including domain name and version number in the string
+(e.g. org.mydomain.guestagent.1).
+
+=item C<connection=CONNECTION>
+
+Specify how the backend will be implemented. This following options are
+available:
+
+=over 4
+
+=item B<connection=SOCKET>
+
+The backend will bind a Unix domain socket (at the path given by
+B<path=PATH>), call listen and accept connections. The backend will proxy
+data between the channel and the connected socket.
+
+=item B<connection=PTY>
+
+The backend will create a pty and proxy data between the channel and the
+master device. The command B<xl channel-list> can be used to discover the
+assigned slave device.
+
+=back
+
+=back
+
+=item B<rdm="RDM_RESERVATION_STRING">
+
+(HVM/x86 only) Specifies information about Reserved Device Memory (RDM),
+which is necessary to enable robust device passthrough. One example of RDM
+is reported through ACPI Reserved Memory Region Reporting (RMRR) structure
+on x86 platform.
+
+B<RDM_RESERVE_STRING> has the form C<[KEY=VALUE,KEY=VALUE,...> where:
+
+=over 4
+
+=item B<KEY=VALUE>
+
+Possible B<KEY>s are:
+
+=over 4
+
+=item B<strategy="STRING">
+
+Currently there is only one valid type:
+
+"host" means all reserved device memory on this platform should be checked to
+reserve regions in this VM's guest address space. This global rdm parameter
+allows user to specify reserved regions explicitly, and using "host" includes
+all reserved regions reported on this platform, which is useful when doing
+hotplug.
+
+By default this isn't set so we don't check all rdms. Instead, we just check
+rdm specific to a given device if you're assigning this kind of device. Note
+this option is not recommended unless you can make sure any conflict does exist.
+
+For example, you're trying to set "memory = 2800" to allocate memory to one
+given VM but the platform owns two RDM regions like,
+
+Device A [sbdf_A]: RMRR region_A: base_addr ac6d3000 end_address ac6e6fff
+Device B [sbdf_B]: RMRR region_B: base_addr ad800000 end_address afffffff
+
+In this conflict case,
+
+#1. If B<strategy> is set to "host", for example,
+
+rdm = "strategy=host,policy=strict" or rdm = "strategy=host,policy=relaxed"
+
+It means all conflicts will be handled according to the policy
+introduced by B<policy> as described below.
+
+#2. If B<strategy> is not set at all, but
+
+pci = [ 'sbdf_A, rdm_policy=xxxxx' ]
+
+It means only one conflict of region_A will be handled according to the policy
+introduced by B<rdm_policy="STRING"> as described inside pci options.
+
+=item B<policy="STRING">
+
+Specifies how to deal with conflicts when reserving reserved device
+memory in guest address space.
+
+When that conflict is unsolved,
+
+"strict" means VM can't be created, or the associated device can't be
+attached in the case of hotplug.
+
+"relaxed" allows VM to be created but may cause VM to crash if
+pass-through device accesses RDM. For exampl,e Windows IGD GFX driver
+always accessed RDM regions so it leads to VM crash.
+
+Note this may be overridden by rdm_policy option in PCI device configuration.
+
+=back
+
+=back
+
+=item B<usbctrl=[ "USBCTRL_SPEC_STRING", "USBCTRL_SPEC_STRING", ... ]>
+
+Specifies the USB controllers created for this guest. Each
+B<USB_SPEC_STRING> has the form C<KEY=VALUE,KEY=VALUE,...> where:
+
+=over 4
+
+=item B<KEY=VALUE>
+
+Possible B<KEY>s are:
+
+=over 4
+
+=item B<type=TYPE>
+
+Specifies the usb controller type.
+
+"pv" denotes a kernel based pvusb backend.
+
+"qusb" specifies a qemu base backend for pvusb.
+
+"auto" (the default) determines whether a kernel based backend is installed.
+If this is the case, "pv" is selected, "qusb" will be selected if no kernel
+backend is currently available.
+
+=item B<version=VERSION>
+
+Specifies the usb controller version. Possible values include
+1 (USB1.1) and 2 (USB2.0). Default is 2 (USB2.0).
+
+=item B<ports=PORTS>
+
+Specifies the total ports of the usb controller. The maximum
+number is 31. Default is 8.
+
+USB controler ids start from 0. In line with the USB spec, however,
+ports on a controller start from 1.
+
+E.g.
+usbctrl=["version=1,ports=4", "version=2,ports=8",]
+The first controller has:
+controller id = 0, and port 1,2,3,4.
+The second controller has:
+controller id = 1, and port 1,2,3,4,5,6,7,8.
+
+=back
+
+=back
+
+=item B<usbdev=[ "USB_SPEC_STRING", "USB_SPEC_STRING", ... ]>
+
+Specifies the USB devices to be attached to the guest at boot. Each
+B<USB_SPEC_STRING> has the form C<KEY=VALUE,KEY=VALUE,...> where:
+
+=over 4
+
+=item B<KEY=VALUE>
+
+Possible B<KEY>s are:
+
+=over 4
+
+=item B<devtype=hostdev>
+
+Specifies USB device type. Currently only support 'hostdev'.
+
+=item B<hostbus=busnum>
+
+Specifies busnum of the USB device from the host perspective.
+
+=item B<hostaddr=devnum>
+
+Specifies devnum of the USB device from the host perspective.
+
+=item B<controller=CONTROLLER>
+
+Specifies USB controller id, to which controller the USB device is attached.
+
+=item B<port=PORT>
+
+Specifies USB port, to which port the USB device is attached. B<port=PORT>
+is valid only when B<controller=CONTROLLER> is specified.
+
+=back
+
+If no controller is specified, an available controller:port combination
+will be used. If there are no available controller:port options,
+a new controller will be created.
+
+=back
+
+=item B<pci=[ "PCI_SPEC_STRING", "PCI_SPEC_STRING", ... ]>
+
+Specifies the host PCI devices to passthrough to this guest. Each B<PCI_SPEC_STRING>
+has the form C<[DDDD:]BB:DD.F[@VSLOT],KEY=VALUE,KEY=VALUE,...> where:
+
+=over 4
+
+=item B<DDDD:BB:DD.F>
+
+Identifies the PCI device from the host perspective in domain
+(B<DDDD>), Bus (B<BB>), Device (B<DD>) and Function (B<F>) syntax. This is
+the same scheme as used in the output of C<lspci> for the device in
+question. Note: By default C<lspci> will omit the domain (B<DDDD>) if it
+is zero and it is optional here also. You may specify the function
+(B<F>) as B<*> to indicate all functions.
+
+=item B<@VSLOT>
+
+Specifies the virtual device where the guest will see this
+device. This is equivalent to the B<DD> which the guest sees. In a
+guest B<DDDD> and B<BB> are C<0000:00>.
+
+=item B<KEY=VALUE>
+
+Possible B<KEY>s are:
+
+=over 4
+
+=item B<permissive=BOOLEAN>
+
+By default pciback only allows PV guests to write "known safe" values
+into PCI config space, likewise QEMU (both qemu-xen and
+qemu-traditional) imposes the same contraint on HVM guests. However
+many devices require writes to other areas of config space in order to
+operate properly. This option tells the backend (pciback or QEMU) to
+allow all writes to PCI config space of this device by this domain.
+
+This option should be enabled with caution: it gives the guest much
+more control over the device, which may have security or stability
+implications. It is recommended to enable this option only for
+trusted VMs under administrator control.
+
+=item B<msitranslate=BOOLEAN>
+
+Specifies that MSI-INTx translation should be turned on for the PCI
+device. When enabled, MSI-INTx translation will always enable MSI on
+the PCI device regardless whether the guest uses INTx or MSI. Some
+device drivers, such as NVIDIA's, detect an inconsistency and do not
+function when this option is enabled. Therefore the default is false (0).
+
+=item B<seize=BOOLEAN>
+
+Tells xl to automatically attempt to re-assign a device to
+pciback if it is not already assigned.
+
+WARNING: If you set this option, xl will gladly re-assign a critical
+system device, such as a network or a disk controller being used by
+dom0 without confirmation. Please use with care.
+
+=item B<power_mgmt=BOOLEAN>
+
+(HVM only) Specifies that the VM should be able to program the
+D0-D3hot power management states for the PCI device. False (0) by
+default.
+
+=item B<rdm_policy="STRING">
+
+(HVM/x86 only) This is same as policy option inside the rdm option but
+just specific to a given device. Therefore the default is "relaxed" as
+same as policy option as well.
+
+Note this would override global B<rdm> option.
+
+=back
+
+=back
+
+=item B<pci_permissive=BOOLEAN>
+
+Changes the default value of 'permissive' for all PCI devices passed
+through to this VM. See L<permissive|/"permissive_boolean"> above.
+
+=item B<pci_msitranslate=BOOLEAN>
+
+Changes the default value of 'msitranslate' for all PCI devices passed
+through to this VM. See L<msitranslate|/"msitranslate_boolean"> above.
+
+=item B<pci_seize=BOOLEAN>
+
+Changes the default value of 'seize' for all PCI devices passed
+through to this VM. See L<seize|/"seize_boolean"> above.
+
+=item B<pci_power_mgmt=BOOLEAN>
+
+(HVM only) Changes the default value of 'power_mgmt' for all PCI
+devices passed through to this VM. See L<power_mgt|/"power_mgmt_boolean">
+above.
+
+=item B<gfx_passthru=BOOLEAN|"STRING">
+
+Enable graphics device PCI passthrough. This option makes an assigned
+PCI graphics card become primary graphics card in the VM. The QEMU
+emulated graphics adapter is disabled and the VNC console for the VM
+will not have any graphics output. All graphics output, including boot
+time QEMU BIOS messages from the VM, will go to the physical outputs
+of the passedthrough physical graphics card.
+
+The graphics card PCI device to passthrough is chosen with B<pci>
+option, exactly in the same way as normal Xen PCI device
+passthrough/assignment is done. Note that gfx_passthru does not do
+any kind of sharing of the GPU, so you can only assign the GPU to one
+single VM at a time.
+
+gfx_passthru also enables various legacy VGA memory ranges, BARs, MMIOs,
+and ioports to be passed thru to the VM, since those are required
+for correct operation of things like VGA BIOS, text mode, VBE, etc.
+
+Enabling gfx_passthru option also copies the physical graphics card
+video BIOS to the guest memory, and executes the VBIOS in the guest
+to initialize the graphics card.
+
+Most graphics adapters require vendor specific tweaks for properly
+working graphics passthrough. See the XenVGAPassthroughTestedAdapters
+L<http://wiki.xen.org/wiki/XenVGAPassthroughTestedAdapters> wiki page
+for currently supported graphics cards for gfx_passthru.
+
+gfx_passthru is currently supported both with the qemu-xen-traditional
+device-model and upstream qemu-xen device-model.
+
+When given as a boolean the B<gfx_passthru> option either disables gfx
+passthru or enables autodetection.
+
+But when given as a string the B<gfx_passthru> option describes the type
+of device to enable. Note this behavior is only supported with the upstream
+qemu-xen device-model. With qemu-xen-traditional IGD is always assumed
+and other options than autodetect or explicit IGD will result in an error.
+
+Currently, valid options are:
+
+=over 4
+
+=item B<gfx_passthru=0>
+
+Disables graphics device PCI passthrough.
+
+=item B<gfx_passthru=1>, B<gfx_passthru="default">
+
+Enables graphics device PCI passthrough and autodetects the type of device
+which is being used.
+
+=item "igd"
+
+Enables graphics device PCI passthrough but forcing the type of device to
+Intel Graphics Device.
+
+=back
+
+Note that some graphics adapters (AMD/ATI cards, for example) do not
+necessarily require gfx_passthru option, so you can use the normal Xen
+PCI passthrough to assign the graphics card as a secondary graphics
+card to the VM. The QEMU-emulated graphics card remains the primary
+graphics card, and VNC output is available from the QEMU-emulated
+primary adapter.
+
+More information about Xen gfx_passthru feature is available
+on the XenVGAPassthrough L<http://wiki.xen.org/wiki/XenVGAPassthrough>
+wiki page.
+
+=item B<rdm_mem_boundary=MBYTES>
+
+Number of megabytes to set a boundary for checking rdm conflict.
+
+When RDM conflicts with RAM, RDM probably scatter the whole RAM space.
+Especially multiple RDM entries would worsen this to lead a complicated
+memory layout. So here we're trying to figure out a simple solution to
+avoid breaking existing layout. So when a conflict occurs,
+
+ #1. Above a predefined boundary
+ - move lowmem_end below reserved region to solve conflict;
+
+ #2. Below a predefined boundary
+ - Check strict/relaxed policy.
+ "strict" policy leads to fail libxl. Note when both policies
+ are specified on a given region, 'strict' is always preferred.
+ "relaxed" policy issue a warning message and also mask this
+ entry INVALID to indicate we shouldn't expose this entry to
+ hvmloader.
+
+Here the default is 2G.
+
+=item B<dtdev=[ "DTDEV_PATH", "DTDEV_PATH", ... ]>
+
+Specifies the host device tree nodes to passthrough to this guest. Each
+DTDEV_PATH is the absolute path in the device tree.
+
+=item B<ioports=[ "IOPORT_RANGE", "IOPORT_RANGE", ... ]>
+
+Allow guest to access specific legacy I/O ports. Each B<IOPORT_RANGE>
+is given in hexadecimal and may either a span e.g. C<2f8-2ff>
+(inclusive) or a single I/O port C<2f8>.
+
+It is recommended to use this option only for trusted VMs under
+administrator control.
+
+=item B<iomem=[ "IOMEM_START,NUM_PAGES[@GFN]", "IOMEM_START,NUM_PAGES[@GFN]", ... ]>
+
+Allow auto-translated domains to access specific hardware I/O memory pages.
+
+B<IOMEM_START> is a physical page number. B<NUM_PAGES> is the number of pages
+beginning with B<START_PAGE> to allow access. B<GFN> specifies the guest frame
+number where the mapping will start in the domU's address space. If B<GFN> is
+not given, the mapping will be performed using B<IOMEM_START> as a start in the
+domU's address space, therefore performing an 1:1 mapping as default.
+All of these values must be given in hexadecimal.
+
+Note that the IOMMU won't be updated with the mappings specified with this
+option. This option therefore should not be used to passthrough any
+IOMMU-protected device.
+
+It is recommended to use this option only for trusted VMs under
+administrator control.
+
+=item B<irqs=[ NUMBER, NUMBER, ... ]>
+
+Allow a guest to access specific physical IRQs.
+
+It is recommended to use this option only for trusted VMs under
+administrator control.
+
+=item B<max_event_channels=N>
+
+Limit the guest to using at most N event channels (PV interrupts).
+Guests use hypervisor resources for each event channel they use.
+
+The default of 1023 should be sufficient for typical guests. The
+maximum value depends what the guest supports. Guests supporting the
+FIFO-based event channel ABI support up to 131,071 event channels.
+Other guests are limited to 4095 (64-bit x86 and ARM) or 1023 (32-bit
+x86).
+
+=back
+
+=head2 Paravirtualised (PV) Guest Specific Options
+
+The following options apply only to Paravirtual guests.
+
+=over 4
+
+=item B<bootloader="PROGRAM">
+
+Run C<PROGRAM> to find the kernel image and ramdisk to use. Normally
+C<PROGRAM> would be C<pygrub>, which is an emulation of
+grub/grub2/syslinux. Either B<kernel> or B<bootloader> must be specified
+for PV guests.
+
+=item B<bootloader_args=[ "ARG", "ARG", ...]>
+
+Append B<ARG>s to the arguments to the B<bootloader>
+program. Alternatively if the argument is a simple string then it will
+be split into words at whitespace (this second option is deprecated).
+
+=item B<e820_host=BOOLEAN>
+
+Selects whether to expose the host e820 (memory map) to the guest via
+the virtual e820. When this option is false (0) the guest pseudo-physical
+address space consists of a single contiguous RAM region. When this
+option is specified the virtual e820 instead reflects the host e820
+and contains the same PCI holes. The total amount of RAM represented
+by the memory map is always the same, this option configures only how
+it is laid out.
+
+Exposing the host e820 to the guest gives the guest kernel the
+opportunity to set aside the required part of its pseudo-physical
+address space in order to provide address space to map passedthrough
+PCI devices. It is guest Operating System dependent whether this
+option is required, specifically it is required when using a mainline
+Linux ("pvops") kernel. This option defaults to true (1) if any PCI
+passthrough devices are configured and false (0) otherwise. If you do not
+configure any passthrough devices at domain creation time but expect
+to hotplug devices later then you should set this option. Conversely
+if your particular guest kernel does not require this behaviour then
+it is safe to allow this to be enabled but you may wish to disable it
+anyway.
+
+=item B<pvh=BOOLEAN>
+
+Selects whether to run this PV guest in an HVM container. Default is 0.
+
+=back
+
+=head2 Fully-virtualised (HVM) Guest Specific Options
+
+The following options apply only to HVM guests.
+
+=head3 Boot Device
+
+=over 4
+
+=item B<boot=[c|d|n]>
+
+Selects the emulated virtual device to boot from. Options are hard
+disk (B<c>), cd-rom (B<d>) or network/PXE (B<n>). Multiple options can be
+given and will be attempted in the order they are given. e.g. to boot
+from cd-rom but fallback to the hard disk you can give B<dc>. The
+default is B<cd>.
+
+=back
+
+=head3 Emulated disk controller type
+
+=over 4
+
+=item B<hdtype="STRING">
+
+Select the hd disk type (ide|ahci).
+If hdtype=ahci adds ich9 disk controller in AHCI mode and uses it with
+upstream qemu to emulate disks instead of IDE. It decreases boot time
+but may not be supported by default in Windows xp and older Windows.
+The default is ide.
+
+=back
+
+=head3 Paging
+
+The following options control the mechanisms used to virtualise guest
+memory. The defaults are selected to give the best results for the
+common case and so you should normally leave these options
+unspecified.
+
+=over 4
+
+=item B<hap=BOOLEAN>
+
+Turns "hardware assisted paging" (the use of the hardware nested page
+table feature) on or off. This feature is called EPT (Extended Page
+Tables) by Intel and NPT (Nested Page Tables) or RVI (Rapid
+Virtualisation Indexing) by AMD. Affects HVM guests only. If turned
+off, Xen will run the guest in "shadow page table" mode where the
+guest's page table updates and/or TLB flushes etc. will be emulated.
+Use of HAP is the default when available.
+
+=item B<oos=BOOLEAN>
+
+Turns "out of sync pagetables" on or off. When running in shadow page
+table mode, the guest's page table updates may be deferred as
+specified in the Intel/AMD architecture manuals. However this may
+expose unexpected bugs in the guest, or find bugs in Xen, so it is
+possible to disable this feature. Use of out of sync page tables,
+when Xen thinks it appropriate, is the default.
+
+=item B<shadow_memory=MBYTES>
+
+Number of megabytes to set aside for shadowing guest pagetable pages
+(effectively acting as a cache of translated pages) or to use for HAP
+state. By default this is 1MB per guest vcpu plus 8KB per MB of guest
+RAM. You should not normally need to adjust this value. However if you
+are not using hardware assisted paging (i.e. you are using shadow
+mode) and your guest workload consists of a a very large number of
+similar processes then increasing this value may improve performance.
+
+=back
+
+=head3 Processor and Platform Features
+
+The following options allow various processor and platform level
+features to be hidden or exposed from the guest's point of view. This
+can be useful when running older guest Operating Systems which may
+misbehave when faced with more modern features. In general you should
+accept the defaults for these options wherever possible.
+
+=over 4
+
+=item B<bios="STRING">
+
+Select the virtual firmware that is exposed to the guest.
+By default, a guess is made based on the device model, but sometimes
+it may be useful to request a different one, like UEFI.
+
+=over 4
+
+=item B<rombios>
+
+Loads ROMBIOS, a 16-bit x86 compatible BIOS. This is used by default
+when device_model_version=qemu-xen-traditional. This is the only BIOS
+option supported when device_model_version=qemu-xen-traditional. This is
+the BIOS used by all previous Xen versions.
+
+=item B<seabios>
+
+Loads SeaBIOS, a 16-bit x86 compatible BIOS. This is used by default
+with device_model_version=qemu-xen.
+
+=item B<ovmf>
+
+Loads OVMF, a standard UEFI firmware by Tianocore project.
+Requires device_model_version=qemu-xen.
+
+=back
+
+=item B<pae=BOOLEAN>
+
+Hide or expose the IA32 Physical Address Extensions. These extensions
+make it possible for a 32 bit guest Operating System to access more
+than 4GB of RAM. Enabling PAE also enabled other features such as
+NX. PAE is required if you wish to run a 64-bit guest Operating
+System. In general you should leave this enabled and allow the guest
+Operating System to choose whether or not to use PAE. (X86 only)
+
+=item B<acpi=BOOLEAN>
+
+Expose ACPI (Advanced Configuration and Power Interface) tables from
+the virtual firmware to the guest Operating System. ACPI is required
+by most modern guest Operating Systems. This option is enabled by
+default and usually you should omit it. However it may be necessary to
+disable ACPI for compatibility with some guest Operating Systems.
+
+=item B<acpi_s3=BOOLEAN>
+
+Include the S3 (suspend-to-ram) power state in the virtual firmware
+ACPI table. True (1) by default.
+
+=item B<acpi_s4=BOOLEAN>
+
+Include S4 (suspend-to-disk) power state in the virtual firmware ACPI
+table. True (1) by default.
+
+=item B<apic=BOOLEAN>
+
+Include information regarding APIC (Advanced Programmable Interrupt
+Controller) in the firmware/BIOS tables on a single processor
+guest. This causes the MP (multiprocessor) and PIR (PCI Interrupt
+Routing) tables to be exported by the virtual firmware. This option
+has no effect on a guest with multiple virtual CPUS as they must
+always include these tables. This option is enabled by default and you
+should usually omit it but it may be necessary to disable these
+firmware tables when using certain older guest Operating
+Systems. These tables have been superseded by newer constructs within
+the ACPI tables. (X86 only)
+
+=item B<nx=BOOLEAN>
+
+Hides or exposes the No-eXecute capability. This allows a guest
+Operating system to map pages such that they cannot be executed which
+can enhance security. This options requires that PAE also be
+enabled. (X86 only)
+
+=item B<hpet=BOOLEAN>
+
+Enables or disables HPET (High Precision Event Timer). This option is
+enabled by default and you should usually omit it. It may be necessary
+to disable the HPET in order to improve compatibility with guest
+Operating Systems (X86 only)
+
+=item B<altp2mhvm=BOOLEAN>
+
+Enables or disables hvm guest access to alternate-p2m capability.
+Alternate-p2m allows a guest to manage multiple p2m guest physical
+"memory views" (as opposed to a single p2m). This option is
+disabled by default and is available only to hvm domains.
+You may want this option if you want to access-control/isolate
+access to specific guest physical memory pages accessed by
+the guest, e.g. for HVM domain memory introspection or
+for isolation/access-control of memory between components within
+a single guest hvm domain.
+
+=item B<nestedhvm=BOOLEAN>
+
+Enable or disables guest access to hardware virtualisation features,
+e.g. it allows a guest Operating System to also function as a
+hypervisor. This option is disabled by default. You may want this
+option if you want to run another hypervisor (including another copy
+of Xen) within a Xen guest or to support a guest Operating System
+which uses hardware virtualisation extensions (e.g. Windows XP
+compatibility mode on more modern Windows OS).
+
+=item B<cpuid="LIBXL_STRING"> or B<cpuid=[ "XEND_STRING", "XEND_STRING" ]>
+
+Configure the value returned when a guest executes CPUID instruction.
+Two versions of config syntax are recognized: libxl and xend.
+
+The libxl syntax is a comma separated list of key=value pairs, preceded by the
+word "host". A few keys take a numerical value, all others take a single
+character which describes what to do with the feature bit.
+
+Possible values for a single feature bit:
+ '1' -> force the corresponding bit to 1
+ '0' -> force to 0
+ 'x' -> Get a safe value (pass through and mask with the default policy)
+ 'k' -> pass through the host bit value
+ 's' -> as 'k' but preserve across save/restore and migration (not implemented)
+
+Note: when specifying B<cpuid> for hypervisor leaves (0x4000xxxx major group)
+only the lowest 8 bits of leaf's 0x4000xx00 EAX register are processed, the rest
+are ignored (these 8 bits signify maximum number of hypervisor leaves).
+
+List of keys taking a value:
+apicidsize brandid clflush family localapicid maxleaf maxhvleaf model nc
+proccount procpkg stepping
+
+List of keys taking a character:
+3dnow 3dnowext 3dnowprefetch abm acpi aes altmovcr8 apic avx clfsh cmov
+cmplegacy cmpxchg16 cmpxchg8 cntxid dca de ds dscpl dtes64 est extapic f16c
+ffxsr fma4 fpu fxsr htt hypervisor ia64 ibs lahfsahf lm lwp mca mce misalignsse
+mmx mmxext monitor movbe msr mtrr nodeid nx osvw osxsave pae page1gb pat pbe
+pclmulqdq pdcm pge popcnt pse pse36 psn rdtscp skinit smx ss sse sse2 sse3
+sse4_1 sse4_2 sse4a ssse3 svm svm_decode svm_lbrv svm_npt svm_nrips
+svm_pausefilt svm_tscrate svm_vmcbclean syscall sysenter tbm tm tm2 topoext tsc
+vme vmx wdt x2apic xop xsave xtpr
+
+The xend syntax is a list of values in the form of
+'leafnum:register=bitstring,register=bitstring'
+ "leafnum" is the requested function,
+ "register" is the response register to modify
+ "bitstring" represents all bits in the register, its length must be 32 chars.
+ Each successive character represent a lesser-significant bit, possible values
+ are listed above in the libxl section.
+
+Example to hide two features from the guest: 'tm', which is bit #29 in EDX, and
+'pni' (SSE3), which is bit #0 in ECX:
+
+xend: [ '1:ecx=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx0,edx=xx0xxxxxxxxxxxxxxxxxxxxxxxxxxxxx' ]
+
+libxl: 'host,tm=0,sse3=0'
+
+More info about the CPUID instruction can be found in the processor manuals, and
+in Wikipedia: L<http://en.wikipedia.org/wiki/CPUID>
+
+=item B<acpi_firmware="STRING">
+
+Specify a path to a file that contains extra ACPI firmware tables to pass in to
+a guest. The file can contain several tables in their binary AML form
+concatenated together. Each table self describes its length so no additional
+information is needed. These tables will be added to the ACPI table set in the
+guest. Note that existing tables cannot be overridden by this feature. For
+example this cannot be used to override tables like DSDT, FADT, etc.
+
+=item B<smbios_firmware="STRING">
+
+Specify a path to a file that contains extra SMBIOS firmware structures to pass
+in to a guest. The file can contain a set DMTF predefined structures which will
+override the internal defaults. Not all predefined structures can be overridden,
+only the following types: 0, 1, 2, 3, 11, 22, 39. The file can also contain any
+number of vendor defined SMBIOS structures (type 128 - 255). Since SMBIOS
+structures do not present their overall size, each entry in the file must be
+preceded by a 32b integer indicating the size of the next structure.
+
+=item B<ms_vm_genid="OPTION">
+
+Provide a VM generation ID to the guest.
+
+The VM generation ID as a 128-bit random number that a guest may use
+to determine if the guest has been restored from an earlier snapshot
+or cloned.
+
+This is required for Microsoft Windows Server 2012 (and later) domain
+controllers.
+
+Valid options are:
+
+=over 4
+
+=item B<"generate">
+
+Generate a random VM generation ID every time the domain is created or
+restored.
+
+=item B<"none">
+
+Do not provide a VM generation ID.
+
+=back
+
+See also "Virtual Machine Generation ID" by Microsoft
+(http://www.microsoft.com/en-us/download/details.aspx?id=30707).
+
+=back
+
+=head3 Guest Virtual Time Controls
+
+=over 4
+
+=item B<tsc_mode="MODE">
+
+Specifies how the TSC (Time Stamp Counter) should be provided to the
+guest (X86 only). Specifying this option as a number is
+deprecated. Options are:
+
+=over 4
+
+=item B<"default">
+
+Guest rdtsc/p is executed natively when monotonicity can be guaranteed
+and emulated otherwise (with frequency scaled if necessary).
+
+If a HVM container in B<default> TSC mode is created on a host that
+provides constant host TSC, its guest TSC frequency will be the same
+as the host. If it is later migrated to another host that provide
+constant host TSC and supports Intel VMX TSC scaling/AMD SVM TSC
+ratio, its guest TSC frequency will be the same before and after
+migration, and guest rdtsc/p will be executed natively as well after
+migration.
+
+=item B<"always_emulate">
+
+Guest rdtsc/p always emulated at 1GHz (kernel and user). Guest rdtsc/p
+always emulated and the virtual TSC will appear to increment (kernel
+and user) at a fixed 1GHz rate, regardless of the PCPU HZ rate or
+power state; Although there is an overhead associated with emulation
+this will NOT affect underlying CPU performance.
+
+=item B<"native">
+
+Guest rdtsc always executed natively (no monotonicity/frequency
+guarantees); guest rdtscp emulated at native frequency if unsupported
+by h/w, else executed natively.
+
+=item B<"native_paravirt">
+
+Same as B<native>, except xen manages TSC_AUX register so guest can
+determine when a restore/migration has occurred and assumes guest
+obtains/uses pvclock-like mechanism to adjust for monotonicity and
+frequency changes.
+
+If a HVM container in B<native_paravirt> TSC mode can execute both guest
+rdtsc and guest rdtscp natively, then the guest TSC frequency will be
+determined in the similar way to that of B<default> TSC mode.
+
+=back
+
+Please see F<docs/misc/tscmode.txt> for more information on this option.
+
+=item B<localtime=BOOLEAN>
+
+Set the real time clock to local time or to UTC. False (0) by default,
+i.e. set to UTC.
+
+=item B<rtc_timeoffset=SECONDS>
+
+Set the real time clock offset in seconds. False (0) by default.
+
+=item B<vpt_align=BOOLEAN>
+
+Specifies that periodic Virtual Platform Timers should be aligned to
+reduce guest interrupts. Enabling this option can reduce power
+consumption, especially when a guest uses a high timer interrupt
+frequency (HZ) values. The default is true (1).
+
+=item B<timer_mode=MODE>
+
+Specifies the mode for Virtual Timers. The valid values are as follows:
+
+=over 4
+
+=item B<"delay_for_missed_ticks">
+
+Delay for missed ticks. Do not advance a vcpu's time beyond the
+correct delivery time for interrupts that have been missed due to
+preemption. Deliver missed interrupts when the vcpu is rescheduled and
+advance the vcpu's virtual time stepwise for each one.
+
+=item B<"no_delay_for_missed_ticks">
+
+No delay for missed ticks. As above, missed interrupts are delivered,
+but guest time always tracks wallclock (i.e., real) time while doing
+so.
+
+=item B<"no_missed_ticks_pending">
+
+No missed interrupts are held pending. Instead, to ensure ticks are
+delivered at some non-zero rate, if we detect missed ticks then the
+internal tick alarm is not disabled if the VCPU is preempted during
+the next tick period.
+
+=item B<"one_missed_tick_pending">
+
+One missed tick pending. Missed interrupts are collapsed
+together and delivered as one 'late tick'. Guest time always tracks
+wallclock (i.e., real) time.
+
+=back
+
+=back
+
+=head3 Memory layout
+
+=over 4
+
+=item B<mmio_hole=MBYTES>
+
+Specifies the size the MMIO hole below 4GiB will be. Only valid for
+device_model_version = "qemu-xen".
+
+Cannot be smaller than 256. Cannot be larger than 3840.
+
+Known good large value is 3072.
+
+=back
+
+=head3 Support for Paravirtualisation of HVM Guests
+
+The following options allow Paravirtualised features (such as devices)
+to be exposed to the guest Operating System in an HVM guest.
+Utilising these features requires specific guest support but when
+available they will result in improved performance.
+
+=over 4
+
+=item B<xen_platform_pci=BOOLEAN>
+
+Enable or disable the Xen platform PCI device. The presence of this
+virtual device enables a guest Operating System (subject to the
+availability of suitable drivers) to make use of paravirtualisation
+features such as disk and network devices etc. Enabling these drivers
+improves performance and is strongly recommended when available. PV
+drivers are available for various Operating Systems including HVM
+Linux L<http://wiki.xen.org/wiki/XenLinuxPVonHVMdrivers> and Microsoft
+Windows L<http://wiki.xen.org/wiki/XenWindowsGplPv>.
+
+Setting B<xen_platform_pci=0> with the default device_model "qemu-xen"
+requires at least QEMU 1.6.
+
+=item B<viridian=[ "GROUP", "GROUP", ...]>
+
+The groups of Microsoft Hyper-V (AKA viridian) compatible enlightenments
+exposed to the guest. The following groups of enlightenments may be
+specified:
+
+=over 4
+
+=item B<base>
+
+This group incorporates the Hypercall MSRs, Virtual processor index MSR,
+and APIC access MSRs. These enlightenments can improve performance of
+Windows Vista and Windows Server 2008 onwards and setting this option
+for such guests is strongly recommended.
+This group is also a pre-requisite for all others. If it is disabled
+then it is an error to attempt to enable any other group.
+
+=item B<freq>
+
+This group incorporates the TSC and APIC frequency MSRs. These
+enlightenments can improve performance of Windows 7 and Windows
+Server 2008 R2 onwards.
+
+=item B<time_ref_count>
+
+This group incorporates Partition Time Reference Counter MSR. This
+enlightenment can improve performance of Windows 8 and Windows
+Server 2012 onwards.
+
+=item B<reference_tsc>
+
+This set incorporates the Partition Reference TSC MSR. This
+enlightenment can improve performance of Windows 7 and Windows
+Server 2008 R2 onwards.
+
+=item B<hcall_remote_tlb_flush>
+
+This set incorporates use of hypercalls for remote TLB flushing.
+This enlightenment may improve performance of Windows guests running
+on hosts with higher levels of (physical) CPU contention.
+
+=item B<apic_assist>
+
+This set incorporates use of the APIC assist page to avoid EOI of
+the local APIC.
+This enlightenment may improve performance of guests that make use of
+per-vcpu event channel upcall vectors.
+Note that this enlightenment will have no effect if the guest is
+using APICv posted interrupts.
+
+=item B<defaults>
+
+This is a special value that enables the default set of groups, which
+is currently the B<base>, B<freq>, B<time_ref_count> and B<apic_assist>
+groups.
+
+=item B<all>
+
+This is a special value that enables all available groups.
+
+=back
+
+Groups can be disabled by prefixing the name with '!'. So, for example,
+to enable all groups except B<freq>, specify:
+
+=over 4
+
+B<viridian=[ "all", "!freq" ]>
+
+=back
+
+For details of the enlightenments see the latest version of Microsoft's
+Hypervisor Top-Level Functional Specification.
+
+The enlightenments should be harmless for other versions of Windows
+(although they will not give any benefit) and the majority of other
+non-Windows OSes.
+However it is known that they are incompatible with some other Operating
+Systems and in some circumstance can prevent Xen's own paravirtualisation
+interfaces for HVM guests from being used.
+
+The viridian option can be specified as a boolean. A value of true (1)
+is equivalent to the list [ "defaults" ], and a value of false (0) is
+equivalent to an empty list.
+
+=back
+
+=head3 Emulated VGA Graphics Device
+
+The following options control the features of the emulated graphics
+device. Many of these options behave similarly to the equivalent key
+in the B<VFB_SPEC_STRING> for configuring virtual frame buffer devices
+(see above).
+
+=over 4
+
+=item B<videoram=MBYTES>
+
+Sets the amount of RAM which the emulated video card will contain,
+which in turn limits the resolutions and bit depths which will be
+available.
+
+When using the qemu-xen-traditional device-model, the default as well as
+minimum amount of video RAM for stdvga is 8 MB, which is sufficient for e.g.
+1600x1200 at 32bpp. For the upstream qemu-xen device-model, the default and
+minimum is 16 MB.
+
+When using the emulated Cirrus graphics card (B<vga="cirrus">) and the
+qemu-xen-traditional device-model, the amount of video RAM is fixed at 4 MB,
+which is sufficient for 1024x768 at 32 bpp. For the upstream qemu-xen
+device-model, the default and minimum is 8 MB.
+
+For B<qxl> vga, the default is both default and minimal 128MB.
+If B<videoram> is set less than 128MB, an error will be triggered.
+
+=item B<stdvga=BOOLEAN>
+
+Select a standard VGA card with VBE (VESA BIOS Extensions) as the
+emulated graphics device. The default is false (0) which means to emulate
+a Cirrus Logic GD5446 VGA card. If your guest supports VBE 2.0 or
+later (e.g. Windows XP onwards) then you should enable this.
+stdvga supports more video ram and bigger resolutions than Cirrus.
+This option is deprecated, use vga="stdvga" instead.
+
+=item B<vga="STRING">
+
+Selects the emulated video card (none|stdvga|cirrus|qxl).
+The default is cirrus.
+
+In general, QXL should work with the Spice remote display protocol
+for acceleration, and QXL driver is necessary in guest in this case.
+QXL can also work with the VNC protocol, but it will be like a standard
+VGA without acceleration.
+
+=item B<vnc=BOOLEAN>
+
+Allow access to the display via the VNC protocol. This enables the
+other VNC-related settings. The default is to enable this.
+
+=item B<vnclisten="ADDRESS[:DISPLAYNUM]">
+
+Specifies the IP address, and optionally VNC display number, to use.
+
+=item B<vncdisplay=DISPLAYNUM>
+
+Specifies the VNC display number to use. The actual TCP port number
+will be DISPLAYNUM+5900.
+
+=item B<vncunused=BOOLEAN>
+
+Requests that the VNC display setup search for a free TCP port to use.
+The actual display used can be accessed with C<xl vncviewer>.
+
+=item B<vncpasswd="PASSWORD">
+
+Specifies the password for the VNC server.
+
+=item B<keymap="LANG">
+
+Configure the keymap to use for the keyboard associated with this
+display. If the input method does not easily support raw keycodes
+(e.g. this is often the case when using VNC) then this allows us to
+correctly map the input keys into keycodes seen by the guest. The
+specific values which are accepted are defined by the version of the
+device-model which you are using. See L</"Keymaps"> below or consult the
+L<qemu(1)> manpage. The default is B<en-us>.
+
+=item B<sdl=BOOLEAN>
+
+Specifies that the display should be presented via an X window (using
+Simple DirectMedia Layer). The default is not to enable this mode.
+
+=item B<opengl=BOOLEAN>
+
+Enable OpenGL acceleration of the SDL display. Only effects machines
+using B<device_model_version="qemu-xen-traditional"> and only if the
+device-model was compiled with OpenGL support. False (0) by default.
+
+=item B<nographic=BOOLEAN>
+
+Enable or disable the virtual graphics device. The default is to
+provide a VGA graphics device but this option can be used to disable
+it.
+
+=back
+
+=head3 Spice Graphics Support
+
+The following options control the features of SPICE.
+
+=over 4
+
+=item B<spice=BOOLEAN>
+
+Allow access to the display via the SPICE protocol. This enables the
+other SPICE-related settings.
+
+=item B<spicehost="ADDRESS">
+
+Specify the interface address to listen on if given, otherwise any
+interface.
+
+=item B<spiceport=NUMBER>
+
+Specify the port to listen on by the SPICE server if the SPICE is
+enabled.
+
+=item B<spicetls_port=NUMBER>
+
+Specify the secure port to listen on by the SPICE server if the SPICE
+is enabled. At least one of the spiceport or spicetls_port must be
+given if SPICE is enabled. NB. the options depending on spicetls_port
+have not been supported.
+
+=item B<spicedisable_ticketing=BOOLEAN>
+
+Enable client connection without password. When disabled, spicepasswd
+must be set. The default is false (0).
+
+=item B<spicepasswd="PASSWORD">
+
+Specify the ticket password which is used by a client for connection.
+
+=item B<spiceagent_mouse=BOOLEAN>
+
+Whether SPICE agent is used for client mouse mode. The default is true (1)
+(turn on)
+
+=item B<spicevdagent=BOOLEAN>
+
+Enables spice vdagent. The Spice vdagent is an optional component for
+enhancing user experience and performing guest-oriented management
+tasks. Its features includes: client mouse mode (no need to grab mouse
+by client, no mouse lag), automatic adjustment of screen resolution,
+copy and paste (text and image) between client and domU. It also
+requires vdagent service installed on domU o.s. to work. The default is 0.
+
+=item B<spice_clipboard_sharing=BOOLEAN>
+
+Enables Spice clipboard sharing (copy/paste). It requires spicevdagent
+enabled. The default is false (0).
+
+=item B<spiceusbredirection=NUMBER>
+
+Enables spice usbredirection. Creates NUMBER usbredirection channels
+for redirection of up to 4 usb devices from spice client to domU's qemu.
+It requires an usb controller and if not defined it will automatically adds
+an usb2 controller. The default is disabled (0).
+
+=item B<spice_image_compression=[auto_glz|auto_lz|quic|glz|lz|off]>
+
+Specifies what image compression is to be used by spice (if given), otherwise
+the qemu default will be used. Please see documentations of your current qemu
+version for details.
+
+=item B<spice_streaming_video=[filter|all|off]>
+
+Specifies what streaming video setting is to be used by spice (if given),
+otherwise the qemu default will be used.
+
+=back
+
+=head3 Miscellaneous Emulated Hardware
+
+=over 4
+
+=item B<serial=[ "DEVICE", "DEVICE", ...]>
+
+Redirect virtual serial ports to B<DEVICE>s. Please see the
+B<-serial> option in the L<qemu(1)> manpage for details of the valid
+B<DEVICE> options. Default is B<vc> when in graphical mode and
+B<stdio> if B<nographics=1> is used.
+
+The form serial=DEVICE is also accepted for backwards compatibilty.
+
+=item B<soundhw=DEVICE>
+
+Select the virtual sound card to expose to the guest. The valid
+devices are defined by the device model configuration, please see the
+L<qemu(1)> manpage for details. The default is not to export any sound
+device.
+
+=item B<usb=BOOLEAN>
+
+Enables or disables an emulated USB bus in the guest.
+
+=item B<usbversion=NUMBER>
+
+Specifies the type of an emulated USB bus in the guest. 1 for usb1,
+2 for usb2 and 3 for usb3, it is available only with upstream qemu.
+Due to implementation limitations this is not compatible with the usb
+and usbdevice parameters.
+Default is 0 (no usb controller defined).
+
+=item B<usbdevice=[ "DEVICE", "DEVICE", ...]>
+
+Adds B<DEVICE>s to the emulated USB bus. The USB bus must also be
+enabled using B<usb=1>. The most common use for this option is
+B<usbdevice=['tablet']> which adds pointer device using absolute
+coordinates. Such devices function better than relative coordinate
+devices (such as a standard mouse) since many methods of exporting
+guest graphics (such as VNC) work better in this mode. Note that this
+is independent of the actual pointer device you are using on the
+host/client side.
+
+Host devices can also be passed through in this way, by specifying
+host:USBID, where USBID is of the form xxxx:yyyy. The USBID can
+typically be found by using lsusb or usb-devices.
+
+If you wish to use the "host:bus.addr" format, remove any leading '0' from the
+bus and addr. For example, for the USB device on bus 008 dev 002, you should
+write "host:8.2".
+
+The form usbdevice=DEVICE is also accepted for backwards compatibility.
+
+More valid options can be found in the "usbdevice" section of the qemu
+documentation.
+
+=item B<vendor_device="VENDOR_DEVICE">
+
+Selects which variant of the QEMU xen-pvdevice should be used for this
+guest. Valid values are:
+
+=over 4
+
+=item B<none>
+
+The xen-pvdevice should be omitted. This is the default.
+
+=item B<xenserver>
+
+The xenserver variant of the xen-pvdevice (device-id=C000) will be
+specified, enabling the use of XenServer PV drivers in the guest.
+
+=back
+
+This parameter only takes effect when device_model_version=qemu-xen.
+See F<docs/misc/pci-device-reservations.txt> for more information.
+
+=back
+
+=head2 Device-Model Options
+
+The following options control the selection of the device-model. This
+is the component which provides emulation of the virtual devices to an
+HVM guest. For a PV guest a device-model is sometimes used to provide
+backends for certain PV devices (most usually a virtual framebuffer
+device).
+
+=over 4
+
+=item B<device_model_version="DEVICE-MODEL">
+
+Selects which variant of the device-model should be used for this
+guest. Valid values are:
+
+=over 4
+
+=item B<qemu-xen>
+
+Use the device-model merged into the upstream QEMU project.
+This device-model is the default for Linux dom0.
+
+=item B<qemu-xen-traditional>
+
+Use the device-model based upon the historical Xen fork of Qemu.
+This device-model is still the default for NetBSD dom0.
+
+=item B<none>
+
+Don't use any device model. This requires a kernel capable of booting
+without emulated devices.
+
+=back
+
+It is recommended to accept the default value for new guests. If
+you have existing guests then, depending on the nature of the guest
+Operating System, you may wish to force them to use the device
+model which they were installed with.
+
+=item B<device_model_override="PATH">
+
+Override the path to the binary to be used as the device-model. The
+binary provided here MUST be consistent with the
+`device_model_version` which you have specified. You should not
+normally need to specify this option.
+
+=item B<device_model_stubdomain_override=BOOLEAN>
+
+Override the use of stubdomain based device-model. Normally this will
+be automatically selected based upon the other features and options
+you have selected.
+
+=item B<device_model_stubdomain_seclabel="LABEL">
+
+Assign an XSM security label to the device-model stubdomain.
+
+=item B<device_model_args=[ "ARG", "ARG", ...]>
+
+Pass additional arbitrary options on the device-model command
+line. Each element in the list is passed as an option to the
+device-model.
+
+=item B<device_model_args_pv=[ "ARG", "ARG", ...]>
+
+Pass additional arbitrary options on the device-model command line for
+a PV device model only. Each element in the list is passed as an
+option to the device-model.
+
+=item B<device_model_args_hvm=[ "ARG", "ARG", ...]>
+
+Pass additional arbitrary options on the device-model command line for
+an HVM device model only. Each element in the list is passed as an
+option to the device-model.
+
+=back
+
+=head2 Keymaps
+
+The keymaps available are defined by the device-model which you are
+using. Commonly this includes:
+
+ ar de-ch es fo fr-ca hu ja mk no pt-br sv
+ da en-gb et fr fr-ch is lt nl pl ru th
+ de en-us fi fr-be hr it lv nl-be pt sl tr
+
+The default is B<en-us>.
+
+See L<qemu(1)> for more information.
+
+=head2 Architecture Specific options
+
+=head3 ARM
+
+=over 4
+
+=item B<gic_version="vN">
+
+Version of the GIC emulated for the guest. Currently, the following
+versions are supported:
+
+=over 4
+
+=item B<v2>
+
+Emulate a GICv2
+
+=item B<v3>
+
+Emulate a GICv3. Note that the emulated GIC does not support the
+GICv2 compatibility mode.
+
+=item B<default>
+
+Emulate the same version as the native GIC hardware used by host where
+the domain was created.
+
+=back
+
+This requires hardware compatibility with the requested version. Either
+natively or via hardware backwards compatibility support.
+
+=back
+
+=head1 SEE ALSO
+
+=over 4
+
+=item L<xl(1)>
+
+=item L<xlcpupool.cfg(5)>
+
+=item F<xl-disk-configuration>
+
+=item F<xl-network-configuration>
+
+=item F<docs/misc/tscmode.txt>
+
+=back
+
+=head1 FILES
+
+F</etc/xen/NAME.cfg>
+F<@XEN_DUMP_DIR@/NAME>
+
+=head1 BUGS
+
+This document may contain items which require further
+documentation. Patches to improve incomplete items (or any other item)
+are gratefully received on the xen-devel@lists.xen.org mailing
+list. Please see L<http://wiki.xen.org/wiki/SubmittingXenPatches> for
+information on how to submit a patch to Xen.
+
+++ /dev/null
-=head1 NAME
-
-XL - Xen management tool, based on LibXenlight
-
-=head1 SYNOPSIS
-
-B<xl> I<subcommand> [I<args>]
-
-=head1 DESCRIPTION
-
-The B<xl> program is the new tool for managing Xen guest
-domains. The program can be used to create, pause, and shutdown
-domains. It can also be used to list current domains, enable or pin
-VCPUs, and attach or detach virtual block devices.
-
-The basic structure of every B<xl> command is almost always:
-
-=over 2
-
-B<xl> I<subcommand> [I<OPTIONS>] I<domain-id>
-
-=back
-
-Where I<subcommand> is one of the subcommands listed below, I<domain-id>
-is the numeric domain id, or the domain name (which will be internally
-translated to domain id), and I<OPTIONS> are subcommand specific
-options. There are a few exceptions to this rule in the cases where
-the subcommand in question acts on all domains, the entire machine,
-or directly on the Xen hypervisor. Those exceptions will be clear for
-each of those subcommands.
-
-=head1 NOTES
-
-=over 4
-
-=item start the script B</etc/init.d/xencommons> at boot time
-
-Most B<xl> operations rely upon B<xenstored> and B<xenconsoled>: make
-sure you start the script B</etc/init.d/xencommons> at boot time to
-initialize all the daemons needed by B<xl>.
-
-=item setup a B<xenbr0> bridge in dom0
-
-In the most common network configuration, you need to setup a bridge in dom0
-named B<xenbr0> in order to have a working network in the guest domains.
-Please refer to the documentation of your Linux distribution to know how to
-setup the bridge.
-
-=item B<autoballoon>
-
-If you specify the amount of memory dom0 has, passing B<dom0_mem> to
-Xen, it is highly recommended to disable B<autoballoon>. Edit
-B</etc/xen/xl.conf> and set it to 0.
-
-=item run xl as B<root>
-
-Most B<xl> commands require root privileges to run due to the
-communications channels used to talk to the hypervisor. Running as
-non root will return an error.
-
-=back
-
-=head1 GLOBAL OPTIONS
-
-Some global options are always available:
-
-=over 4
-
-=item B<-v>
-
-Verbose.
-
-=item B<-N>
-
-Dry run: do not actually execute the command.
-
-=item B<-f>
-
-Force execution: xl will refuse to run some commands if it detects that xend is
-also running, this option will force the execution of those commands, even
-though it is unsafe.
-
-=item B<-t>
-
-Always use carriage-return-based overwriting for printing progress
-messages without scrolling the screen. Without -t, this is done only
-if stderr is a tty.
-
-=back
-
-=head1 DOMAIN SUBCOMMANDS
-
-The following subcommands manipulate domains directly. As stated
-previously, most commands take I<domain-id> as the first parameter.
-
-=over 4
-
-=item B<button-press> I<domain-id> I<button>
-
-I<This command is deprecated. Please use C<xl trigger> in preference>
-
-Indicate an ACPI button press to the domain. I<button> is may be 'power' or
-'sleep'. This command is only available for HVM domains.
-
-=item B<create> [I<configfile>] [I<OPTIONS>]
-
-The create subcommand takes a config file as first argument: see
-L<xl.cfg> for full details of that file format and possible options.
-If I<configfile> is missing B<XL> creates the domain starting from the
-default value for every option.
-
-I<configfile> has to be an absolute path to a file.
-
-Create will return B<as soon> as the domain is started. This B<does
-not> mean the guest OS in the domain has actually booted, or is
-available for input.
-
-If the I<-F> option is specified, create will start the domain and not
-return until its death.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-q>, B<--quiet>
-
-No console output.
-
-=item B<-f=FILE>, B<--defconfig=FILE>
-
-Use the given configuration file.
-
-=item B<-p>
-
-Leave the domain paused after it is created.
-
-=item B<-F>
-
-Run in foreground until death of the domain.
-
-=item B<-V>, B<--vncviewer>
-
-Attach to domain's VNC server, forking a vncviewer process.
-
-=item B<-A>, B<--vncviewer-autopass>
-
-Pass VNC password to vncviewer via stdin.
-
-=item B<-c>
-
-Attach console to the domain as soon as it has started. This is
-useful for determining issues with crashing domains and just as a
-general convenience since you often want to watch the
-domain boot.
-
-=item B<key=value>
-
-It is possible to pass I<key=value> pairs on the command line to provide
-options as if they were written in the configuration file; these override
-whatever is in the I<configfile>.
-
-NB: Many config options require characters such as quotes or brackets
-which are interpreted by the shell (and often discarded) before being
-passed to xl, resulting in xl being unable to parse the value
-correctly. A simple work-around is to put all extra options within a
-single set of quotes, separated by semicolons. (See below for an example.)
-
-=back
-
-B<EXAMPLES>
-
-=over 4
-
-=item I<with config file>
-
- xl create DebianLenny
-
-This creates a domain with the file /etc/xen/DebianLenny, and returns as
-soon as it is run.
-
-=item I<with extra parameters>
-
- xl create hvm.cfg 'cpus="0-3"; pci=["01:05.1","01:05.2"]'
-
-This creates a domain with the file hvm.cfg, but additionally pins it to
-cpus 0-3, and passes through two PCI devices.
-
-=back
-
-=item B<config-update> B<domid> [I<configfile>] [I<OPTIONS>]
-
-Update the saved configuration for a running domain. This has no
-immediate effect but will be applied when the guest is next
-restarted. This command is useful to ensure that runtime modifications
-made to the guest will be preserved when the guest is restarted.
-
-Since Xen 4.5 xl has improved capabilities to handle dynamic domain
-configuration changes and will preserve any changes made a runtime
-when necessary. Therefore it should not normally be necessary to use
-this command any more.
-
-I<configfile> has to be an absolute path to a file.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-f=FILE>, B<--defconfig=FILE>
-
-Use the given configuration file.
-
-=item B<key=value>
-
-It is possible to pass I<key=value> pairs on the command line to
-provide options as if they were written in the configuration file;
-these override whatever is in the I<configfile>. Please see the note
-under I<create> on handling special characters when passing
-I<key=value> pairs on the command line.
-
-=back
-
-=item B<console> [I<OPTIONS>] I<domain-id>
-
-Attach to domain I<domain-id>'s console. If you've set up your domains to
-have a traditional log in console this will look much like a normal
-text log in screen.
-
-Use the key combination Ctrl+] to detach the domain console.
-
-B<OPTIONS>
-
-=over 4
-
-=item I<-t [pv|serial]>
-
-Connect to a PV console or connect to an emulated serial console.
-PV consoles are the only consoles available for PV domains while HVM
-domains can have both. If this option is not specified it defaults to
-emulated serial for HVM guests and PV console for PV guests.
-
-=item I<-n NUM>
-
-Connect to console number I<NUM>. Console numbers start from 0.
-
-=back
-
-=item B<destroy> [I<OPTIONS>] I<domain-id>
-
-Immediately terminate the domain I<domain-id>. This doesn't give the
-domain OS any chance to react, and is the equivalent of ripping the
-power cord out on a physical machine. In most cases you will want to
-use the B<shutdown> command instead.
-
-B<OPTIONS>
-
-=over 4
-
-=item I<-f>
-
-Allow domain 0 to be destroyed. Because domain cannot destroy itself, this is
-only possible when using a disaggregated toolstack, and is most useful when
-using a hardware domain separated from domain 0.
-
-=back
-
-=item B<domid> I<domain-name>
-
-Converts a domain name to a domain id.
-
-=item B<domname> I<domain-id>
-
-Converts a domain id to a domain name.
-
-=item B<rename> I<domain-id> I<new-name>
-
-Change the domain name of I<domain-id> to I<new-name>.
-
-=item B<dump-core> I<domain-id> [I<filename>]
-
-Dumps the virtual machine's memory for the specified domain to the
-I<filename> specified, without pausing the domain. The dump file will
-be written to a distribution specific directory for dump files. Such
-as: /var/lib/xen/dump.
-
-=item B<help> [I<--long>]
-
-Displays the short help message (i.e. common commands).
-
-The I<--long> option prints out the complete set of B<xl> subcommands,
-grouped by function.
-
-=item B<list> [I<OPTIONS>] [I<domain-id> ...]
-
-Prints information about one or more domains. If no domains are
-specified it prints out information about all domains.
-
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-l>, B<--long>
-
-The output for B<xl list> is not the table view shown below, but
-instead presents the data in as a JSON data structure.
-
-=item B<-Z>, B<--context>
-Also prints the security labels.
-
-=item B<-v>, B<--verbose>
-
-Also prints the domain UUIDs, the shutdown reason and security labels.
-
-=item B<-c>, <--cpupool>
-
-Also prints the cpupool the domain belong to.
-
-=item B<-n>, <--numa>
-
-Also prints the domain NUMA node affinity.
-
-=back
-
-B<EXAMPLE>
-
-An example format for the list is as follows:
-
- Name ID Mem VCPUs State Time(s)
- Domain-0 0 750 4 r----- 11794.3
- win 1 1019 1 r----- 0.3
- linux 2 2048 2 r----- 5624.2
-
-Name is the name of the domain. ID the numeric domain id. Mem is the
-desired amount of memory to allocate to the domain (although it may
-not be the currently allocated amount). VCPUs is the number of
-virtual CPUs allocated to the domain. State is the run state (see
-below). Time is the total run time of the domain as accounted for by
-Xen.
-
-B<STATES>
-
-The State field lists 6 states for a Xen domain, and which ones the
-current domain is in.
-
-=over 4
-
-=item B<r - running>
-
-The domain is currently running on a CPU.
-
-=item B<b - blocked>
-
-The domain is blocked, and not running or runnable. This can be caused
-because the domain is waiting on IO (a traditional wait state) or has
-gone to sleep because there was nothing else for it to do.
-
-=item B<p - paused>
-
-The domain has been paused, usually occurring through the administrator
-running B<xl pause>. When in a paused state the domain will still
-consume allocated resources like memory, but will not be eligible for
-scheduling by the Xen hypervisor.
-
-=item B<s - shutdown>
-
-The guest OS has shut down (SCHEDOP_shutdown has been called) but the
-domain is not dying yet.
-
-=item B<c - crashed>
-
-The domain has crashed, which is always a violent ending. Usually
-this state can only occur if the domain has been configured not to
-restart on crash. See L<xl.cfg(5)> for more info.
-
-=item B<d - dying>
-
-The domain is in process of dying, but hasn't completely shutdown or
-crashed.
-
-=back
-
-B<NOTES>
-
-=over 4
-
-The Time column is deceptive. Virtual IO (network and block devices)
-used by domains requires coordination by Domain0, which means that
-Domain0 is actually charged for much of the time that a DomainU is
-doing IO. Use of this time value to determine relative utilizations
-by domains is thus very suspect, as a high IO workload may show as
-less utilized than a high CPU workload. Consider yourself warned.
-
-=back
-
-=item B<mem-max> I<domain-id> I<mem>
-
-Specify the maximum amount of memory the domain is able to use, appending 't'
-for terabytes, 'g' for gigabytes, 'm' for megabytes, 'k' for kilobytes and 'b'
-for bytes.
-
-The mem-max value may not correspond to the actual memory used in the
-domain, as it may balloon down its memory to give more back to the OS.
-
-=item B<mem-set> I<domain-id> I<mem>
-
-Set the domain's used memory using the balloon driver; append 't' for
-terabytes, 'g' for gigabytes, 'm' for megabytes, 'k' for kilobytes and 'b' for
-bytes.
-
-Because this operation requires cooperation from the domain operating
-system, there is no guarantee that it will succeed. This command will
-definitely not work unless the domain has the required paravirt
-driver.
-
-B<Warning:> There is no good way to know in advance how small of a
-mem-set will make a domain unstable and cause it to crash. Be very
-careful when using this command on running domains.
-
-=item B<migrate> [I<OPTIONS>] I<domain-id> I<host>
-
-Migrate a domain to another host machine. By default B<xl> relies on ssh as a
-transport mechanism between the two hosts.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-s> I<sshcommand>
-
-Use <sshcommand> instead of ssh. String will be passed to sh. If empty, run
-<host> instead of ssh <host> xl migrate-receive [-d -e].
-
-=item B<-e>
-
-On the new host, do not wait in the background (on <host>) for the death of the
-domain. See the corresponding option of the I<create> subcommand.
-
-=item B<-C> I<config>
-
-Send <config> instead of config file from creation.
-
-=item B<--debug>
-
-Print huge (!) amount of debug during the migration process.
-
-=back
-
-=item B<remus> [I<OPTIONS>] I<domain-id> I<host>
-
-Enable Remus HA or COLO HA for domain. By default B<xl> relies on ssh as a
-transport mechanism between the two hosts.
-
-B<NOTES>
-
-=over 4
-
-Remus support in xl is still in experimental (proof-of-concept) phase.
-Disk replication support is limited to DRBD disks.
-
-COLO support in xl is still in experimental (proof-of-concept)
-phase. All options are subject to change in the future.
-
-=back
-
-COLO disk configuration looks like:
-
- disk = ['...,colo,colo-host=xxx,colo-port=xxx,colo-export=xxx,active-disk=xxx,hidden-disk=xxx...']
-
-The supported options are:
-
-=over 4
-
-=item B<colo-host> :Secondary host's ip address.
-
-=item B<colo-port> :Secondary host's port, we will run a nbd server on
-secondary host, and the nbd server will listen this port.
-
-=item B<colo-export> :Nbd server's disk export name of secondary host.
-
-=item B<active-disk> :Secondary's guest write will be buffered in this disk,
-and it's used by secondary.
-
-=item B<hidden-disk> :Primary's modified contents will be buffered in this
-disk, and it's used by secondary.
-
-=back
-
-COLO network configuration looks like:
-
- vif = [ '...,forwarddev=xxx,...']
-
-The supported options are:
-
-=over 4
-
-=item B<forwarddev> :Forward devices for primary and secondary, they are
-directly connected.
-
-
-=back
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-i> I<MS>
-
-Checkpoint domain memory every MS milliseconds (default 200ms).
-
-=item B<-u>
-
-Disable memory checkpoint compression.
-
-=item B<-s> I<sshcommand>
-
-Use <sshcommand> instead of ssh. String will be passed to sh.
-If empty, run <host> instead of ssh <host> xl migrate-receive -r [-e].
-
-=item B<-e>
-
-On the new host, do not wait in the background (on <host>) for the death
-of the domain. See the corresponding option of the I<create> subcommand.
-
-=item B<-N> I<netbufscript>
-
-Use <netbufscript> to setup network buffering instead of the
-default script (/etc/xen/scripts/remus-netbuf-setup).
-
-=item B<-F>
-
-Run Remus in unsafe mode. Use this option with caution as failover may
-not work as intended.
-
-=item B<-b>
-
-Replicate memory checkpoints to /dev/null (blackhole).
-Generally useful for debugging. Requires enabling unsafe mode.
-
-=item B<-n>
-
-Disable network output buffering. Requires enabling unsafe mode.
-
-=item B<-d>
-
-Disable disk replication. Requires enabling unsafe mode.
-
-=item B<-c>
-
-Enable COLO HA. This conflicts with B<-i> and B<-b>, and memory
-checkpoint compression must be disabled.
-
-=back
-
-=item B<pause> I<domain-id>
-
-Pause a domain. When in a paused state the domain will still consume
-allocated resources such as memory, but will not be eligible for
-scheduling by the Xen hypervisor.
-
-=item B<reboot> [I<OPTIONS>] I<domain-id>
-
-Reboot a domain. This acts just as if the domain had the B<reboot>
-command run from the console. The command returns as soon as it has
-executed the reboot action, which may be significantly before the
-domain actually reboots.
-
-For HVM domains this requires PV drivers to be installed in your guest
-OS. If PV drivers are not present but you have configured the guest OS
-to behave appropriately you may be able to use the I<-F> option
-trigger a reset button press.
-
-The behavior of what happens to a domain when it reboots is set by the
-B<on_reboot> parameter of the domain configuration file when the
-domain was created.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-F>
-
-If the guest does not support PV reboot control then fallback to
-sending an ACPI power event (equivalent to the I<reset> option to
-I<trigger>.
-
-You should ensure that the guest is configured to behave as expected
-in response to this event.
-
-=back
-
-=item B<restore> [I<OPTIONS>] [I<ConfigFile>] I<CheckpointFile>
-
-Build a domain from an B<xl save> state file. See B<save> for more info.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-p>
-
-Do not unpause domain after restoring it.
-
-=item B<-e>
-
-Do not wait in the background for the death of the domain on the new host.
-See the corresponding option of the I<create> subcommand.
-
-=item B<-d>
-
-Enable debug messages.
-
-=item B<-V>, B<--vncviewer>
-
-Attach to domain's VNC server, forking a vncviewer process.
-
-=item B<-A>, B<--vncviewer-autopass>
-
-Pass VNC password to vncviewer via stdin.
-
-
-
-=back
-
-=item B<save> [I<OPTIONS>] I<domain-id> I<CheckpointFile> [I<ConfigFile>]
-
-Saves a running domain to a state file so that it can be restored
-later. Once saved, the domain will no longer be running on the
-system, unless the -c or -p options are used.
-B<xl restore> restores from this checkpoint file.
-Passing a config file argument allows the user to manually select the VM config
-file used to create the domain.
-
-=over 4
-
-=item B<-c>
-
-Leave domain running after creating the snapshot.
-
-=item B<-p>
-
-Leave domain paused after creating the snapshot.
-
-=back
-
-=item B<sharing> [I<domain-id>]
-
-List count of shared pages.
-
-B<OPTIONS>
-
-=over 4
-
-=item I<domain_id>
-
-List specifically for that domain. Otherwise, list for all domains.
-
-=back
-
-=item B<shutdown> [I<OPTIONS>] I<-a|domain-id>
-
-Gracefully shuts down a domain. This coordinates with the domain OS
-to perform graceful shutdown, so there is no guarantee that it will
-succeed, and may take a variable length of time depending on what
-services must be shutdown in the domain.
-
-For HVM domains this requires PV drivers to be installed in your guest
-OS. If PV drivers are not present but you have configured the guest OS
-to behave appropriately you may be able to use the I<-F> option
-trigger a power button press.
-
-The command returns immediately after signally the domain unless that
-B<-w> flag is used.
-
-The behavior of what happens to a domain when it reboots is set by the
-B<on_shutdown> parameter of the domain configuration file when the
-domain was created.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-a>, B<--all>
-
-Shutdown all guest domains. Often used when doing a complete shutdown
-of a Xen system.
-
-=item B<-w>, B<--wait>
-
-Wait for the domain to complete shutdown before returning.
-
-=item B<-F>
-
-If the guest does not support PV shutdown control then fallback to
-sending an ACPI power event (equivalent to the I<power> option to
-I<trigger>.
-
-You should ensure that the guest is configured to behave as expected
-in response to this event.
-
-=back
-
-=item B<sysrq> I<domain-id> I<letter>
-
-Send a <Magic System Request> to the domain, each type of request is
-represented by a different letter.
-It can be used to send SysRq requests to Linux guests, see sysrq.txt in
-your Linux Kernel sources for more information.
-It requires PV drivers to be installed in your guest OS.
-
-=item B<trigger> I<domain-id> I<nmi|reset|init|power|sleep|s3resume> [I<VCPU>]
-
-Send a trigger to a domain, where the trigger can be: nmi, reset, init, power
-or sleep. Optionally a specific vcpu number can be passed as an argument.
-This command is only available for HVM domains.
-
-=item B<unpause> I<domain-id>
-
-Moves a domain out of the paused state. This will allow a previously
-paused domain to now be eligible for scheduling by the Xen hypervisor.
-
-=item B<vcpu-set> I<domain-id> I<vcpu-count>
-
-Enables the I<vcpu-count> virtual CPUs for the domain in question.
-Like mem-set, this command can only allocate up to the maximum virtual
-CPU count configured at boot for the domain.
-
-If the I<vcpu-count> is smaller than the current number of active
-VCPUs, the highest number VCPUs will be hotplug removed. This may be
-important for pinning purposes.
-
-Attempting to set the VCPUs to a number larger than the initially
-configured VCPU count is an error. Trying to set VCPUs to < 1 will be
-quietly ignored.
-
-Some guests may need to actually bring the newly added CPU online
-after B<vcpu-set>, go to B<SEE ALSO> section for information.
-
-=item B<vcpu-list> [I<domain-id>]
-
-Lists VCPU information for a specific domain. If no domain is
-specified, VCPU information for all domains will be provided.
-
-=item B<vcpu-pin> [I<-f|--force>] I<domain-id> I<vcpu> I<cpus hard> I<cpus soft>
-
-Set hard and soft affinity for a I<vcpu> of <domain-id>. Normally VCPUs
-can float between available CPUs whenever Xen deems a different run state
-is appropriate.
-
-Hard affinity can be used to restrict this, by ensuring certain VCPUs
-can only run on certain physical CPUs. Soft affinity specifies a I<preferred>
-set of CPUs. Soft affinity needs special support in the scheduler, which is
-only provided in credit1.
-
-The keyword B<all> can be used to apply the hard and soft affinity masks to
-all the VCPUs in the domain. The symbol '-' can be used to leave either
-hard or soft affinity alone.
-
-For example:
-
- xl vcpu-pin 0 3 - 6-9
-
-will set soft affinity for vCPU 3 of domain 0 to pCPUs 6,7,8 and 9,
-leaving its hard affinity untouched. On the othe hand:
-
- xl vcpu-pin 0 3 3,4 6-9
-
-will set both hard and soft affinity, the former to pCPUs 3 and 4, the
-latter to pCPUs 6,7,8, and 9.
-
-Specifying I<-f> or I<--force> will remove a temporary pinning done by the
-operating system (normally this should be done by the operating system).
-In case a temporary pinning is active for a vcpu the affinity of this vcpu
-can't be changed without this option.
-
-=item B<vm-list>
-
-Prints information about guests. This list excludes information about
-service or auxiliary domains such as dom0 and stubdoms.
-
-B<EXAMPLE>
-
-An example format for the list is as follows:
-
- UUID ID name
- 59e1cf6c-6ab9-4879-90e7-adc8d1c63bf5 2 win
- 50bc8f75-81d0-4d53-b2e6-95cb44e2682e 3 linux
-
-=item B<vncviewer> [I<OPTIONS>] I<domain-id>
-
-Attach to domain's VNC server, forking a vncviewer process.
-
-B<OPTIONS>
-
-=over 4
-
-=item I<--autopass>
-
-Pass VNC password to vncviewer via stdin.
-
-=back
-
-=back
-
-=head1 XEN HOST SUBCOMMANDS
-
-=over 4
-
-=item B<debug-keys> I<keys>
-
-Send debug I<keys> to Xen. It is the same as pressing the Xen
-"conswitch" (Ctrl-A by default) three times and then pressing "keys".
-
-=item B<dmesg> [B<-c>]
-
-Reads the Xen message buffer, similar to dmesg on a Linux system. The
-buffer contains informational, warning, and error messages created
-during Xen's boot process. If you are having problems with Xen, this
-is one of the first places to look as part of problem determination.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-c>, B<--clear>
-
-Clears Xen's message buffer.
-
-=back
-
-=item B<info> [B<-n>, B<--numa>]
-
-Print information about the Xen host in I<name : value> format. When
-reporting a Xen bug, please provide this information as part of the
-bug report. See I<http://wiki.xen.org/xenwiki/ReportingBugs> on how to
-report Xen bugs.
-
-Sample output looks as follows:
-
- host : scarlett
- release : 3.1.0-rc4+
- version : #1001 SMP Wed Oct 19 11:09:54 UTC 2011
- machine : x86_64
- nr_cpus : 4
- nr_nodes : 1
- cores_per_socket : 4
- threads_per_core : 1
- cpu_mhz : 2266
- hw_caps : bfebfbff:28100800:00000000:00003b40:009ce3bd:00000000:00000001:00000000
- virt_caps : hvm hvm_directio
- total_memory : 6141
- free_memory : 4274
- free_cpus : 0
- outstanding_claims : 0
- xen_major : 4
- xen_minor : 2
- xen_extra : -unstable
- xen_caps : xen-3.0-x86_64 xen-3.0-x86_32p hvm-3.0-x86_32 hvm-3.0-x86_32p hvm-3.0-x86_64
- xen_scheduler : credit
- xen_pagesize : 4096
- platform_params : virt_start=0xffff800000000000
- xen_changeset : Wed Nov 02 17:09:09 2011 +0000 24066:54a5e994a241
- xen_commandline : com1=115200,8n1 guest_loglvl=all dom0_mem=750M console=com1
- cc_compiler : gcc version 4.4.5 (Debian 4.4.5-8)
- cc_compile_by : sstabellini
- cc_compile_domain : uk.xensource.com
- cc_compile_date : Tue Nov 8 12:03:05 UTC 2011
- xend_config_format : 4
-
-
-B<FIELDS>
-
-Not all fields will be explained here, but some of the less obvious
-ones deserve explanation:
-
-=over 4
-
-=item B<hw_caps>
-
-A vector showing what hardware capabilities are supported by your
-processor. This is equivalent to, though more cryptic, the flags
-field in /proc/cpuinfo on a normal Linux machine: they both derive from
-the feature bits returned by the cpuid command on x86 platforms.
-
-=item B<free_memory>
-
-Available memory (in MB) not allocated to Xen, or any other domains, or
-claimed for domains.
-
-=item B<outstanding_claims>
-
-When a claim call is done (see L<xl.conf>) a reservation for a specific
-amount of pages is set and also a global value is incremented. This
-global value (outstanding_claims) is then reduced as the domain's memory
-is populated and eventually reaches zero. Most of the time the value will
-be zero, but if you are launching multiple guests, and B<claim_mode> is
-enabled, this value can increase/decrease. Note that the value also
-affects the B<free_memory> - as it will reflect the free memory
-in the hypervisor minus the outstanding pages claimed for guests.
-See xl I<info> B<claims> parameter for detailed listing.
-
-=item B<xen_caps>
-
-The Xen version and architecture. Architecture values can be one of:
-x86_32, x86_32p (i.e. PAE enabled), x86_64, ia64.
-
-=item B<xen_changeset>
-
-The Xen mercurial changeset id. Very useful for determining exactly
-what version of code your Xen system was built from.
-
-=back
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-n>, B<--numa>
-
-List host NUMA topology information
-
-=back
-
-=item B<top>
-
-Executes the B<xentop> command, which provides real time monitoring of
-domains. Xentop is a curses interface, and reasonably self
-explanatory.
-
-=item B<uptime>
-
-Prints the current uptime of the domains running.
-
-=item B<claims>
-
-Prints information about outstanding claims by the guests. This provides
-the outstanding claims and currently populated memory count for the guests.
-These values added up reflect the global outstanding claim value, which
-is provided via the I<info> argument, B<outstanding_claims> value.
-The B<Mem> column has the cumulative value of outstanding claims and
-the total amount of memory that has been right now allocated to the guest.
-
-B<EXAMPLE>
-
-An example format for the list is as follows:
-
- Name ID Mem VCPUs State Time(s) Claimed
- Domain-0 0 2047 4 r----- 19.7 0
- OL5 2 2048 1 --p--- 0.0 847
- OL6 3 1024 4 r----- 5.9 0
- Windows_XP 4 2047 1 --p--- 0.0 1989
-
-In which it can be seen that the OL5 guest still has 847MB of claimed
-memory (out of the total 2048MB where 1191MB has been allocated to
-the guest).
-
-=back
-
-=head1 SCHEDULER SUBCOMMANDS
-
-Xen ships with a number of domain schedulers, which can be set at boot
-time with the B<sched=> parameter on the Xen command line. By
-default B<credit> is used for scheduling.
-
-=over 4
-
-=item B<sched-credit> [I<OPTIONS>]
-
-Set or get credit scheduler parameters. The credit scheduler is a
-proportional fair share CPU scheduler built from the ground up to be
-work conserving on SMP hosts.
-
-Each domain (including Domain0) is assigned a weight and a cap.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-d DOMAIN>, B<--domain=DOMAIN>
-
-Specify domain for which scheduler parameters are to be modified or retrieved.
-Mandatory for modifying scheduler parameters.
-
-=item B<-w WEIGHT>, B<--weight=WEIGHT>
-
-A domain with a weight of 512 will get twice as much CPU as a domain
-with a weight of 256 on a contended host. Legal weights range from 1
-to 65535 and the default is 256.
-
-=item B<-c CAP>, B<--cap=CAP>
-
-The cap optionally fixes the maximum amount of CPU a domain will be
-able to consume, even if the host system has idle CPU cycles. The cap
-is expressed in percentage of one physical CPU: 100 is 1 physical CPU,
-50 is half a CPU, 400 is 4 CPUs, etc. The default, 0, means there is
-no upper cap.
-
-NB: Many systems have features that will scale down the computing
-power of a cpu that is not 100% utilized. This can be in the
-operating system, but can also sometimes be below the operating system
-in the BIOS. If you set a cap such that individual cores are running
-at less than 100%, this may have an impact on the performance of your
-workload over and above the impact of the cap. For example, if your
-processor runs at 2GHz, and you cap a vm at 50%, the power management
-system may also reduce the clock speed to 1GHz; the effect will be
-that your VM gets 25% of the available power (50% of 1GHz) rather than
-50% (50% of 2GHz). If you are not getting the performance you expect,
-look at performance and cpufreq options in your operating system and
-your BIOS.
-
-=item B<-p CPUPOOL>, B<--cpupool=CPUPOOL>
-
-Restrict output to domains in the specified cpupool.
-
-=item B<-s>, B<--schedparam>
-
-Specify to list or set pool-wide scheduler parameters.
-
-=item B<-t TSLICE>, B<--tslice_ms=TSLICE>
-
-Timeslice tells the scheduler how long to allow VMs to run before
-pre-empting. The default is 30ms. Valid ranges are 1ms to 1000ms.
-The length of the timeslice (in ms) must be higher than the length of
-the ratelimit (see below).
-
-=item B<-r RLIMIT>, B<--ratelimit_us=RLIMIT>
-
-Ratelimit attempts to limit the number of schedules per second. It
-sets a minimum amount of time (in microseconds) a VM must run before
-we will allow a higher-priority VM to pre-empt it. The default value
-is 1000 microseconds (1ms). Valid range is 100 to 500000 (500ms).
-The ratelimit length must be lower than the timeslice length.
-
-=back
-
-B<COMBINATION>
-
-The following is the effect of combining the above options:
-
-=over 4
-
-=item B<E<lt>nothingE<gt>> : List all domain params and sched params from all pools
-
-=item B<-d [domid]> : List domain params for domain [domid]
-
-=item B<-d [domid] [params]> : Set domain params for domain [domid]
-
-=item B<-p [pool]> : list all domains and sched params for [pool]
-
-=item B<-s> : List sched params for poolid 0
-
-=item B<-s [params]> : Set sched params for poolid 0
-
-=item B<-p [pool] -s> : List sched params for [pool]
-
-=item B<-p [pool] -s [params]> : Set sched params for [pool]
-
-=item B<-p [pool] -d>... : Illegal
-
-=back
-
-=item B<sched-credit2> [I<OPTIONS>]
-
-Set or get credit2 scheduler parameters. The credit2 scheduler is a
-proportional fair share CPU scheduler built from the ground up to be
-work conserving on SMP hosts.
-
-Each domain (including Domain0) is assigned a weight.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-d DOMAIN>, B<--domain=DOMAIN>
-
-Specify domain for which scheduler parameters are to be modified or retrieved.
-Mandatory for modifying scheduler parameters.
-
-=item B<-w WEIGHT>, B<--weight=WEIGHT>
-
-A domain with a weight of 512 will get twice as much CPU as a domain
-with a weight of 256 on a contended host. Legal weights range from 1
-to 65535 and the default is 256.
-
-=item B<-p CPUPOOL>, B<--cpupool=CPUPOOL>
-
-Restrict output to domains in the specified cpupool.
-
-=back
-
-=item B<sched-rtds> [I<OPTIONS>]
-
-Set or get rtds (Real Time Deferrable Server) scheduler parameters.
-This rt scheduler applies Preemptive Global Earliest Deadline First
-real-time scheduling algorithm to schedule VCPUs in the system.
-Each VCPU has a dedicated period and budget.
-VCPUs in the same domain have the same period and budget.
-While scheduled, a VCPU burns its budget.
-A VCPU has its budget replenished at the beginning of each period;
-Unused budget is discarded at the end of each period.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-d DOMAIN>, B<--domain=DOMAIN>
-
-Specify domain for which scheduler parameters are to be modified or retrieved.
-Mandatory for modifying scheduler parameters.
-
-=item B<-v VCPUID/all>, B<--vcpuid=VCPUID/all>
-
-Specify vcpu for which scheduler parameters are to be modified or retrieved.
-
-=item B<-p PERIOD>, B<--period=PERIOD>
-
-Period of time, in microseconds, over which to replenish the budget.
-
-=item B<-b BUDGET>, B<--budget=BUDGET>
-
-Amount of time, in microseconds, that the VCPU will be allowed
-to run every period.
-
-=item B<-c CPUPOOL>, B<--cpupool=CPUPOOL>
-
-Restrict output to domains in the specified cpupool.
-
-=back
-
-B<EXAMPLE>
-
-=over 4
-
-1) Use B<-v all> to see the budget and period of all the VCPUs of
-all the domains:
-
- xl sched-rtds -v all
- Cpupool Pool-0: sched=RTDS
- Name ID VCPU Period Budget
- Domain-0 0 0 10000 4000
- vm1 1 0 300 150
- vm1 1 1 400 200
- vm1 1 2 10000 4000
- vm1 1 3 1000 500
- vm2 2 0 10000 4000
- vm2 2 1 10000 4000
-
-Without any arguments, it will output the default scheduing
-parameters for each domain:
-
- xl sched-rtds
- Cpupool Pool-0: sched=RTDS
- Name ID Period Budget
- Domain-0 0 10000 4000
- vm1 1 10000 4000
- vm2 2 10000 4000
-
-
-2) Use, for instance B<-d vm1, -v all> to see the budget and
-period of all VCPUs of a specific domain (B<vm1>):
-
- xl sched-rtds -d vm1 -v all
- Name ID VCPU Period Budget
- vm1 1 0 300 150
- vm1 1 1 400 200
- vm1 1 2 10000 4000
- vm1 1 3 1000 500
-
-To see the parameters of a subset of the VCPUs of a domain, use:
-
- xl sched-rtds -d vm1 -v 0 -v 3
- Name ID VCPU Period Budget
- vm1 1 0 300 150
- vm1 1 3 1000 500
-
-If no B<-v> is speficified, the default scheduling parameter for the
-domain are shown:
-
- xl sched-rtds -d vm1
- Name ID Period Budget
- vm1 1 10000 4000
-
-
-3) Users can set the budget and period of multiple VCPUs of a
-specific domain with only one command,
-e.g., "xl sched-rtds -d vm1 -v 0 -p 100 -b 50 -v 3 -p 300 -b 150".
-
-To change the parameters of all the VCPUs of a domain, use B<-v all>,
-e.g., "xl sched-rtds -d vm1 -v all -p 500 -b 250".
-
-=back
-
-=back
-
-=head1 CPUPOOLS COMMANDS
-
-Xen can group the physical cpus of a server in cpu-pools. Each physical CPU is
-assigned at most to one cpu-pool. Domains are each restricted to a single
-cpu-pool. Scheduling does not cross cpu-pool boundaries, so each cpu-pool has
-an own scheduler.
-Physical cpus and domains can be moved from one cpu-pool to another only by an
-explicit command.
-Cpu-pools can be specified either by name or by id.
-
-=over 4
-
-=item B<cpupool-create> [I<OPTIONS>] [I<ConfigFile>] [I<Variable=Value> ...]
-
-Create a cpu pool based an config from a I<ConfigFile> or command-line
-parameters. Variable settings from the I<ConfigFile> may be altered
-by specifying new or additional assignments on the command line.
-
-See the L<xlcpupool.cfg(5)> manpage for more information.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-f=FILE>, B<--defconfig=FILE>
-
-Use the given configuration file.
-
-=back
-
-=item B<cpupool-list> [I<-c|--cpus>] [I<cpu-pool>]
-
-List CPU pools on the host.
-If I<-c> is specified, B<xl> prints a list of CPUs used by I<cpu-pool>.
-
-=item B<cpupool-destroy> I<cpu-pool>
-
-Deactivates a cpu pool.
-This is possible only if no domain is active in the cpu-pool.
-
-=item B<cpupool-rename> I<cpu-pool> <newname>
-
-Renames a cpu-pool to I<newname>.
-
-=item B<cpupool-cpu-add> I<cpu-pool> I<cpus|node:nodes>
-
-Adds one or more CPUs or NUMA nodes to I<cpu-pool>. CPUs and NUMA
-nodes can be specified as single CPU/node IDs or as ranges.
-
-For example:
-
- (a) xl cpupool-cpu-add mypool 4
- (b) xl cpupool-cpu-add mypool 1,5,10-16,^13
- (c) xl cpupool-cpu-add mypool node:0,nodes:2-3,^10-12,8
-
-means adding CPU 4 to mypool, in (a); adding CPUs 1,5,10,11,12,14,15
-and 16, in (b); and adding all the CPUs of NUMA nodes 0, 2 and 3,
-plus CPU 8, but keeping out CPUs 10,11,12, in (c).
-
-All the specified CPUs that can be added to the cpupool will be added
-to it. If some CPU can't (e.g., because they're already part of another
-cpupool), an error is reported about each one of them.
-
-=item B<cpupool-cpu-remove> I<cpus|node:nodes>
-
-Removes one or more CPUs or NUMA nodes from I<cpu-pool>. CPUs and NUMA
-nodes can be specified as single CPU/node IDs or as ranges, using the
-exact same syntax as in B<cpupool-cpu-add> above.
-
-=item B<cpupool-migrate> I<domain> I<cpu-pool>
-
-Moves a domain specified by domain-id or domain-name into a cpu-pool.
-Domain-0 can't be moved to another cpu-pool.
-
-=item B<cpupool-numa-split>
-
-Splits up the machine into one cpu-pool per numa node.
-
-=back
-
-=head1 VIRTUAL DEVICE COMMANDS
-
-Most virtual devices can be added and removed while guests are
-running, assuming that the necessary support exists in the guest. The
-effect to the guest OS is much the same as any hotplug event.
-
-=head2 BLOCK DEVICES
-
-=over 4
-
-=item B<block-attach> I<domain-id> I<disc-spec-component(s)> ...
-
-Create a new virtual block device. This will trigger a hotplug event
-for the guest.
-
-Note that only PV block devices are supported by block-attach.
-Requests to attach emulated devices (eg, vdev=hdc) will result in only
-the PV view being available to the guest.
-
-B<OPTIONS>
-
-=over 4
-
-=item I<domain-id>
-
-The domain id of the guest domain that the device will be attached to.
-
-=item I<disc-spec-component>
-
-A disc specification in the same format used for the B<disk> variable in
-the domain config file. See
-L<http://xenbits.xen.org/docs/unstable/misc/xl-disk-configuration.txt>.
-
-=back
-
-=item B<block-detach> I<domain-id> I<devid> [B<--force>]
-
-Detach a domain's virtual block device. I<devid> may be the symbolic
-name or the numeric device id given to the device by domain 0. You
-will need to run B<xl block-list> to determine that number.
-
-Detaching the device requires the cooperation of the domain. If the
-domain fails to release the device (perhaps because the domain is hung
-or is still using the device), the detach will fail. The B<--force>
-parameter will forcefully detach the device, but may cause IO errors
-in the domain.
-
-=item B<block-list> I<domain-id>
-
-List virtual block devices for a domain.
-
-=item B<cd-insert> I<domain-id> I<VirtualDevice> I<target>
-
-Insert a cdrom into a guest domain's existing virtial cd drive. The
-virtual drive must already exist but can be current empty.
-
-Only works with HVM domains.
-
-B<OPTIONS>
-
-=over 4
-
-=item I<VirtualDevice>
-
-How the device should be presented to the guest domain; for example "hdc".
-
-=item I<target>
-
-the target path in the backend domain (usually domain 0) to be
-exported; Can be a block device or a file etc. See B<target> in
-F<docs/misc/xl-disk-configuration.txt>.
-
-=back
-
-=item B<cd-eject> I<domain-id> I<VirtualDevice>
-
-Eject a cdrom from a guest's virtual cd drive. Only works with HVM domains.
-
-B<OPTIONS>
-
-=over 4
-
-=item I<VirtualDevice>
-
-How the device should be presented to the guest domain; for example "hdc".
-
-=back
-
-=back
-
-=head2 NETWORK DEVICES
-
-=over 4
-
-=item B<network-attach> I<domain-id> I<network-device>
-
-Creates a new network device in the domain specified by I<domain-id>.
-I<network-device> describes the device to attach, using the same format as the
-B<vif> string in the domain config file. See L<xl.cfg> and
-L<http://xenbits.xen.org/docs/unstable/misc/xl-network-configuration.html>
-for more informations.
-
-Note that only attaching PV network interface is supported.
-
-=item B<network-detach> I<domain-id> I<devid|mac>
-
-Removes the network device from the domain specified by I<domain-id>.
-I<devid> is the virtual interface device number within the domain
-(i.e. the 3 in vif22.3). Alternatively the I<mac> address can be used to
-select the virtual interface to detach.
-
-=item B<network-list> I<domain-id>
-
-List virtual network interfaces for a domain.
-
-=back
-
-=head2 CHANNEL DEVICES
-
-=over 4
-
-=item B<channel-list> I<domain-id>
-
-List virtual channel interfaces for a domain.
-
-=back
-
-=head2 VTPM DEVICES
-
-=over 4
-
-=item B<vtpm-attach> I<domain-id> I<vtpm-device>
-
-Creates a new vtpm device in the domain specified by I<domain-id>.
-I<vtpm-device> describes the device to attach, using the same format as the
-B<vtpm> string in the domain config file. See L<xl.cfg> for
-more information.
-
-=item B<vtpm-detach> I<domain-id> I<devid|uuid>
-
-Removes the vtpm device from the domain specified by I<domain-id>.
-I<devid> is the numeric device id given to the virtual trusted
-platform module device. You will need to run B<xl vtpm-list> to determine that number.
-Alternatively the I<uuid> of the vtpm can be used to
-select the virtual device to detach.
-
-=item B<vtpm-list> I<domain-id>
-
-List virtual trusted platform modules for a domain.
-
-=back
-
-=head1 PCI PASS-THROUGH
-
-=over 4
-
-=item B<pci-assignable-list>
-
-List all the assignable PCI devices.
-These are devices in the system which are configured to be
-available for passthrough and are bound to a suitable PCI
-backend driver in domain 0 rather than a real driver.
-
-=item B<pci-assignable-add> I<BDF>
-
-Make the device at PCI Bus/Device/Function BDF assignable to guests.
-This will bind the device to the pciback driver. If it is already
-bound to a driver, it will first be unbound, and the original driver
-stored so that it can be re-bound to the same driver later if desired.
-If the device is already bound, it will return success.
-
-CAUTION: This will make the device unusable by Domain 0 until it is
-returned with pci-assignable-remove. Care should therefore be taken
-not to do this on a device critical to domain 0's operation, such as
-storage controllers, network interfaces, or GPUs that are currently
-being used.
-
-=item B<pci-assignable-remove> [I<-r>] I<BDF>
-
-Make the device at PCI Bus/Device/Function BDF assignable to guests. This
-will at least unbind the device from pciback. If the -r option is specified,
-it will also attempt to re-bind the device to its original driver, making it
-usable by Domain 0 again. If the device is not bound to pciback, it will
-return success.
-
-=item B<pci-attach> I<domain-id> I<BDF>
-
-Hot-plug a new pass-through pci device to the specified domain.
-B<BDF> is the PCI Bus/Device/Function of the physical device to pass-through.
-
-=item B<pci-detach> [I<-f>] I<domain-id> I<BDF>
-
-Hot-unplug a previously assigned pci device from a domain. B<BDF> is the PCI
-Bus/Device/Function of the physical device to be removed from the guest domain.
-
-If B<-f> is specified, B<xl> is going to forcefully remove the device even
-without guest's collaboration.
-
-=item B<pci-list> I<domain-id>
-
-List pass-through pci devices for a domain.
-
-=back
-
-=head1 USB PASS-THROUGH
-
-=over 4
-
-=item B<usbctrl-attach> I<domain-id> I<usbctrl-device>
-
-Create a new USB controller in the domain specified by I<domain-id>,
-I<usbctrl-device> describes the device to attach, using form
-C<KEY=VALUE KEY=VALUE ...> where B<KEY=VALUE> has the same
-meaning as the B<usbctrl> description in the domain config file.
-See L<xl.cfg> for more information.
-
-=item B<usbctrl-detach> I<domain-id> I<devid>
-
-Destroy a USB controller from the specified domain.
-B<devid> is devid of the USB controller.
-
-=item B<usbdev-attach> I<domain-id> I<usbdev-device>
-
-Hot-plug a new pass-through USB device to the domain specified by
-I<domain-id>, I<usbdev-device> describes the device to attach, using
-form C<KEY=VALUE KEY=VALUE ...> where B<KEY=VALUE> has the same
-meaning as the B<usbdev> description in the domain config file.
-See L<xl.cfg> for more information.
-
-=item B<usbdev-detach> I<domain-id> I<controller=devid> I<port=number>
-
-Hot-unplug a previously assigned USB device from a domain.
-B<controller=devid> and B<port=number> is USB controller:port in guest
-where the USB device is attached to.
-
-=item B<usb-list> I<domain-id>
-
-List pass-through usb devices for a domain.
-
-=back
-
-=head1 TMEM
-
-=over 4
-
-=item B<tmem-list> I[<-l>] I<domain-id>
-
-List tmem pools. If I<-l> is specified, also list tmem stats.
-
-=item B<tmem-freeze> I<domain-id>
-
-Freeze tmem pools.
-
-=item B<tmem-thaw> I<domain-id>
-
-Thaw tmem pools.
-
-=item B<tmem-set> I<domain-id> [I<OPTIONS>]
-
-Change tmem settings.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-w> I<WEIGHT>
-
-Weight (int)
-
-=item B<-c> I<CAP>
-
-Cap (int)
-
-=item B<-p> I<COMPRESS>
-
-Compress (int)
-
-=back
-
-=item B<tmem-shared-auth> I<domain-id> [I<OPTIONS>]
-
-De/authenticate shared tmem pool.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-u> I<UUID>
-
-Specify uuid (abcdef01-2345-6789-1234-567890abcdef)
-
-=item B<-a> I<AUTH>
-
-0=auth,1=deauth
-
-=back
-
-=item B<tmem-freeable>
-
-Get information about how much freeable memory (MB) is in-use by tmem.
-
-=back
-
-=head1 FLASK
-
-B<FLASK> is a security framework that defines a mandatory access control policy
-providing fine-grained controls over Xen domains, allowing the policy writer
-to define what interactions between domains, devices, and the hypervisor are
-permitted. Some example of what you can do using XSM/FLASK:
- - Prevent two domains from communicating via event channels or grants
- - Control which domains can use device passthrough (and which devices)
- - Restrict or audit operations performed by privileged domains
- - Prevent a privileged domain from arbitrarily mapping pages from other
- domains.
-
-You can find more details on how to use FLASK and an example security
-policy here: L<http://xenbits.xen.org/docs/unstable/misc/xsm-flask.txt>
-
-=over 4
-
-=item B<getenforce>
-
-Determine if the FLASK security module is loaded and enforcing its policy.
-
-=item B<setenforce> I<1|0|Enforcing|Permissive>
-
-Enable or disable enforcing of the FLASK access controls. The default is
-permissive, but this can be changed to enforcing by specifying "flask=enforcing"
-or "flask=late" on the hypervisor's command line.
-
-=item B<loadpolicy> I<policy-file>
-
-Load FLASK policy from the given policy file. The initial policy is provided to
-the hypervisor as a multiboot module; this command allows runtime updates to the
-policy. Loading new security policy will reset runtime changes to device labels.
-
-=back
-
-=head1 PLATFORM SHARED RESOURCE MONITORING/CONTROL
-
-Intel Haswell and later server platforms offer shared resource monitoring
-and control technologies. The availability of these technologies and the
-hardware capabilities can be shown with B<psr-hwinfo>.
-
-See L<http://xenbits.xen.org/docs/unstable/misc/xl-psr.html> for more
-information.
-
-=over 4
-
-=item B<psr-hwinfo> [I<OPTIONS>]
-
-Show Platform Shared Resource (PSR) hardware information.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-m>, B<--cmt>
-
-Show Cache Monitoring Technology (CMT) hardware information.
-
-=item B<-a>, B<--cat>
-
-Show Cache Allocation Technology (CAT) hardware information.
-
-=back
-
-=back
-
-=head2 CACHE MONITORING TECHNOLOGY
-
-Intel Haswell and later server platforms offer monitoring capability in each
-logical processor to measure specific platform shared resource metric, for
-example, L3 cache occupancy. In the Xen implementation, the monitoring
-granularity is domain level. To monitor a specific domain, just attach the
-domain id with the monitoring service. When the domain doesn't need to be
-monitored any more, detach the domain id from the monitoring service.
-
-Intel Broadwell and later server platforms also offer total/local memory
-bandwidth monitoring. Xen supports per-domain monitoring for these two
-additional monitoring types. Both memory bandwidth monitoring and L3 cache
-occupancy monitoring share the same set of underlying monitoring service. Once
-a domain is attached to the monitoring service, monitoring data can be shown
-for any of these monitoring types.
-
-=over 4
-
-=item B<psr-cmt-attach> [I<domain-id>]
-
-attach: Attach the platform shared resource monitoring service to a domain.
-
-=item B<psr-cmt-detach> [I<domain-id>]
-
-detach: Detach the platform shared resource monitoring service from a domain.
-
-=item B<psr-cmt-show> [I<psr-monitor-type>] [I<domain-id>]
-
-Show monitoring data for a certain domain or all domains. Current supported
-monitor types are:
- - "cache-occupancy": showing the L3 cache occupancy(KB).
- - "total-mem-bandwidth": showing the total memory bandwidth(KB/s).
- - "local-mem-bandwidth": showing the local memory bandwidth(KB/s).
-
-=back
-
-=head2 CACHE ALLOCATION TECHNOLOGY
-
-Intel Broadwell and later server platforms offer capabilities to configure and
-make use of the Cache Allocation Technology (CAT) mechanisms, which enable more
-cache resources (i.e. L3 cache) to be made available for high priority
-applications. In the Xen implementation, CAT is used to control cache allocation
-on VM basis. To enforce cache on a specific domain, just set capacity bitmasks
-(CBM) for the domain.
-
-Intel Broadwell and later server platforms also offer Code/Data Prioritization
-(CDP) for cache allocations, which support specifying code or data cache for
-applications. CDP is used on a per VM basis in the Xen implementation. To
-specify code or data CBM for the domain, CDP feature must be enabled and CBM
-type options need to be specified when setting CBM, and the type options (code
-and data) are mutually exclusive.
-
-=over 4
-
-=item B<psr-cat-cbm-set> [I<OPTIONS>] I<domain-id> I<cbm>
-
-Set cache capacity bitmasks(CBM) for a domain. For how to specify I<cbm>
-please refer to L<http://xenbits.xen.org/docs/unstable/misc/xl-psr.html>.
-
-B<OPTIONS>
-
-=over 4
-
-=item B<-s SOCKET>, B<--socket=SOCKET>
-
-Specify the socket to process, otherwise all sockets are processed.
-
-=item B<-c>, B<--code>
-
-Set code CBM when CDP is enabled.
-
-=item B<-d>, B<--data>
-
-Set data CBM when CDP is enabled.
-
-=back
-
-=item B<psr-cat-show> [I<domain-id>]
-
-Show CAT settings for a certain domain or all domains.
-
-=back
-
-=head1 IGNORED FOR COMPATIBILITY WITH XM
-
-xl is mostly command-line compatible with the old xm utility used with
-the old Python xend. For compatibility, the following options are
-ignored:
-
-=over 4
-
-=item B<xl migrate --live>
-
-=back
-
-=head1 TO BE DOCUMENTED
-
-We need better documentation for:
-
-=over 4
-
-=item B<tmem>
-
-Transcendent Memory.
-
-=back
-
-=head1 SEE ALSO
-
-The following man pages:
-
-L<xl.cfg>(5), L<xlcpupool.cfg>(5), B<xentop>(1)
-
-And the following documents on the xen.org website:
-
-L<http://xenbits.xen.org/docs/unstable/misc/xl-network-configuration.html>
-L<http://xenbits.xen.org/docs/unstable/misc/xl-disk-configuration.txt>
-L<http://xenbits.xen.org/docs/unstable/misc/xsm-flask.txt>
-L<http://xenbits.xen.org/docs/unstable/misc/xl-psr.html>
-
-For systems that don't automatically bring CPU online:
-
-L<http://wiki.xen.org/wiki/Paravirt_Linux_CPU_Hotplug>
-
-=head1 BUGS
-
-Send bugs to xen-devel@lists.xen.org, see
-http://wiki.xen.org/xenwiki/ReportingBugs on how to send bug reports.
--- /dev/null
+=head1 NAME
+
+XL - Xen management tool, based on LibXenlight
+
+=head1 SYNOPSIS
+
+B<xl> I<subcommand> [I<args>]
+
+=head1 DESCRIPTION
+
+The B<xl> program is the new tool for managing Xen guest
+domains. The program can be used to create, pause, and shutdown
+domains. It can also be used to list current domains, enable or pin
+VCPUs, and attach or detach virtual block devices.
+
+The basic structure of every B<xl> command is almost always:
+
+=over 2
+
+B<xl> I<subcommand> [I<OPTIONS>] I<domain-id>
+
+=back
+
+Where I<subcommand> is one of the subcommands listed below, I<domain-id>
+is the numeric domain id, or the domain name (which will be internally
+translated to domain id), and I<OPTIONS> are subcommand specific
+options. There are a few exceptions to this rule in the cases where
+the subcommand in question acts on all domains, the entire machine,
+or directly on the Xen hypervisor. Those exceptions will be clear for
+each of those subcommands.
+
+=head1 NOTES
+
+=over 4
+
+=item start the script B</etc/init.d/xencommons> at boot time
+
+Most B<xl> operations rely upon B<xenstored> and B<xenconsoled>: make
+sure you start the script B</etc/init.d/xencommons> at boot time to
+initialize all the daemons needed by B<xl>.
+
+=item setup a B<xenbr0> bridge in dom0
+
+In the most common network configuration, you need to setup a bridge in dom0
+named B<xenbr0> in order to have a working network in the guest domains.
+Please refer to the documentation of your Linux distribution to know how to
+setup the bridge.
+
+=item B<autoballoon>
+
+If you specify the amount of memory dom0 has, passing B<dom0_mem> to
+Xen, it is highly recommended to disable B<autoballoon>. Edit
+B</etc/xen/xl.conf> and set it to 0.
+
+=item run xl as B<root>
+
+Most B<xl> commands require root privileges to run due to the
+communications channels used to talk to the hypervisor. Running as
+non root will return an error.
+
+=back
+
+=head1 GLOBAL OPTIONS
+
+Some global options are always available:
+
+=over 4
+
+=item B<-v>
+
+Verbose.
+
+=item B<-N>
+
+Dry run: do not actually execute the command.
+
+=item B<-f>
+
+Force execution: xl will refuse to run some commands if it detects that xend is
+also running, this option will force the execution of those commands, even
+though it is unsafe.
+
+=item B<-t>
+
+Always use carriage-return-based overwriting for printing progress
+messages without scrolling the screen. Without -t, this is done only
+if stderr is a tty.
+
+=back
+
+=head1 DOMAIN SUBCOMMANDS
+
+The following subcommands manipulate domains directly. As stated
+previously, most commands take I<domain-id> as the first parameter.
+
+=over 4
+
+=item B<button-press> I<domain-id> I<button>
+
+I<This command is deprecated. Please use C<xl trigger> in preference>
+
+Indicate an ACPI button press to the domain. I<button> is may be 'power' or
+'sleep'. This command is only available for HVM domains.
+
+=item B<create> [I<configfile>] [I<OPTIONS>]
+
+The create subcommand takes a config file as first argument: see
+L<xl.cfg> for full details of that file format and possible options.
+If I<configfile> is missing B<XL> creates the domain starting from the
+default value for every option.
+
+I<configfile> has to be an absolute path to a file.
+
+Create will return B<as soon> as the domain is started. This B<does
+not> mean the guest OS in the domain has actually booted, or is
+available for input.
+
+If the I<-F> option is specified, create will start the domain and not
+return until its death.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-q>, B<--quiet>
+
+No console output.
+
+=item B<-f=FILE>, B<--defconfig=FILE>
+
+Use the given configuration file.
+
+=item B<-p>
+
+Leave the domain paused after it is created.
+
+=item B<-F>
+
+Run in foreground until death of the domain.
+
+=item B<-V>, B<--vncviewer>
+
+Attach to domain's VNC server, forking a vncviewer process.
+
+=item B<-A>, B<--vncviewer-autopass>
+
+Pass VNC password to vncviewer via stdin.
+
+=item B<-c>
+
+Attach console to the domain as soon as it has started. This is
+useful for determining issues with crashing domains and just as a
+general convenience since you often want to watch the
+domain boot.
+
+=item B<key=value>
+
+It is possible to pass I<key=value> pairs on the command line to provide
+options as if they were written in the configuration file; these override
+whatever is in the I<configfile>.
+
+NB: Many config options require characters such as quotes or brackets
+which are interpreted by the shell (and often discarded) before being
+passed to xl, resulting in xl being unable to parse the value
+correctly. A simple work-around is to put all extra options within a
+single set of quotes, separated by semicolons. (See below for an example.)
+
+=back
+
+B<EXAMPLES>
+
+=over 4
+
+=item I<with config file>
+
+ xl create DebianLenny
+
+This creates a domain with the file /etc/xen/DebianLenny, and returns as
+soon as it is run.
+
+=item I<with extra parameters>
+
+ xl create hvm.cfg 'cpus="0-3"; pci=["01:05.1","01:05.2"]'
+
+This creates a domain with the file hvm.cfg, but additionally pins it to
+cpus 0-3, and passes through two PCI devices.
+
+=back
+
+=item B<config-update> B<domid> [I<configfile>] [I<OPTIONS>]
+
+Update the saved configuration for a running domain. This has no
+immediate effect but will be applied when the guest is next
+restarted. This command is useful to ensure that runtime modifications
+made to the guest will be preserved when the guest is restarted.
+
+Since Xen 4.5 xl has improved capabilities to handle dynamic domain
+configuration changes and will preserve any changes made a runtime
+when necessary. Therefore it should not normally be necessary to use
+this command any more.
+
+I<configfile> has to be an absolute path to a file.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-f=FILE>, B<--defconfig=FILE>
+
+Use the given configuration file.
+
+=item B<key=value>
+
+It is possible to pass I<key=value> pairs on the command line to
+provide options as if they were written in the configuration file;
+these override whatever is in the I<configfile>. Please see the note
+under I<create> on handling special characters when passing
+I<key=value> pairs on the command line.
+
+=back
+
+=item B<console> [I<OPTIONS>] I<domain-id>
+
+Attach to domain I<domain-id>'s console. If you've set up your domains to
+have a traditional log in console this will look much like a normal
+text log in screen.
+
+Use the key combination Ctrl+] to detach the domain console.
+
+B<OPTIONS>
+
+=over 4
+
+=item I<-t [pv|serial]>
+
+Connect to a PV console or connect to an emulated serial console.
+PV consoles are the only consoles available for PV domains while HVM
+domains can have both. If this option is not specified it defaults to
+emulated serial for HVM guests and PV console for PV guests.
+
+=item I<-n NUM>
+
+Connect to console number I<NUM>. Console numbers start from 0.
+
+=back
+
+=item B<destroy> [I<OPTIONS>] I<domain-id>
+
+Immediately terminate the domain I<domain-id>. This doesn't give the
+domain OS any chance to react, and is the equivalent of ripping the
+power cord out on a physical machine. In most cases you will want to
+use the B<shutdown> command instead.
+
+B<OPTIONS>
+
+=over 4
+
+=item I<-f>
+
+Allow domain 0 to be destroyed. Because domain cannot destroy itself, this is
+only possible when using a disaggregated toolstack, and is most useful when
+using a hardware domain separated from domain 0.
+
+=back
+
+=item B<domid> I<domain-name>
+
+Converts a domain name to a domain id.
+
+=item B<domname> I<domain-id>
+
+Converts a domain id to a domain name.
+
+=item B<rename> I<domain-id> I<new-name>
+
+Change the domain name of I<domain-id> to I<new-name>.
+
+=item B<dump-core> I<domain-id> [I<filename>]
+
+Dumps the virtual machine's memory for the specified domain to the
+I<filename> specified, without pausing the domain. The dump file will
+be written to a distribution specific directory for dump files. Such
+as: @XEN_DUMP_DIR@/dump.
+
+=item B<help> [I<--long>]
+
+Displays the short help message (i.e. common commands).
+
+The I<--long> option prints out the complete set of B<xl> subcommands,
+grouped by function.
+
+=item B<list> [I<OPTIONS>] [I<domain-id> ...]
+
+Prints information about one or more domains. If no domains are
+specified it prints out information about all domains.
+
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-l>, B<--long>
+
+The output for B<xl list> is not the table view shown below, but
+instead presents the data in as a JSON data structure.
+
+=item B<-Z>, B<--context>
+Also prints the security labels.
+
+=item B<-v>, B<--verbose>
+
+Also prints the domain UUIDs, the shutdown reason and security labels.
+
+=item B<-c>, <--cpupool>
+
+Also prints the cpupool the domain belong to.
+
+=item B<-n>, <--numa>
+
+Also prints the domain NUMA node affinity.
+
+=back
+
+B<EXAMPLE>
+
+An example format for the list is as follows:
+
+ Name ID Mem VCPUs State Time(s)
+ Domain-0 0 750 4 r----- 11794.3
+ win 1 1019 1 r----- 0.3
+ linux 2 2048 2 r----- 5624.2
+
+Name is the name of the domain. ID the numeric domain id. Mem is the
+desired amount of memory to allocate to the domain (although it may
+not be the currently allocated amount). VCPUs is the number of
+virtual CPUs allocated to the domain. State is the run state (see
+below). Time is the total run time of the domain as accounted for by
+Xen.
+
+B<STATES>
+
+The State field lists 6 states for a Xen domain, and which ones the
+current domain is in.
+
+=over 4
+
+=item B<r - running>
+
+The domain is currently running on a CPU.
+
+=item B<b - blocked>
+
+The domain is blocked, and not running or runnable. This can be caused
+because the domain is waiting on IO (a traditional wait state) or has
+gone to sleep because there was nothing else for it to do.
+
+=item B<p - paused>
+
+The domain has been paused, usually occurring through the administrator
+running B<xl pause>. When in a paused state the domain will still
+consume allocated resources like memory, but will not be eligible for
+scheduling by the Xen hypervisor.
+
+=item B<s - shutdown>
+
+The guest OS has shut down (SCHEDOP_shutdown has been called) but the
+domain is not dying yet.
+
+=item B<c - crashed>
+
+The domain has crashed, which is always a violent ending. Usually
+this state can only occur if the domain has been configured not to
+restart on crash. See L<xl.cfg(5)> for more info.
+
+=item B<d - dying>
+
+The domain is in process of dying, but hasn't completely shutdown or
+crashed.
+
+=back
+
+B<NOTES>
+
+=over 4
+
+The Time column is deceptive. Virtual IO (network and block devices)
+used by domains requires coordination by Domain0, which means that
+Domain0 is actually charged for much of the time that a DomainU is
+doing IO. Use of this time value to determine relative utilizations
+by domains is thus very suspect, as a high IO workload may show as
+less utilized than a high CPU workload. Consider yourself warned.
+
+=back
+
+=item B<mem-max> I<domain-id> I<mem>
+
+Specify the maximum amount of memory the domain is able to use, appending 't'
+for terabytes, 'g' for gigabytes, 'm' for megabytes, 'k' for kilobytes and 'b'
+for bytes.
+
+The mem-max value may not correspond to the actual memory used in the
+domain, as it may balloon down its memory to give more back to the OS.
+
+=item B<mem-set> I<domain-id> I<mem>
+
+Set the domain's used memory using the balloon driver; append 't' for
+terabytes, 'g' for gigabytes, 'm' for megabytes, 'k' for kilobytes and 'b' for
+bytes.
+
+Because this operation requires cooperation from the domain operating
+system, there is no guarantee that it will succeed. This command will
+definitely not work unless the domain has the required paravirt
+driver.
+
+B<Warning:> There is no good way to know in advance how small of a
+mem-set will make a domain unstable and cause it to crash. Be very
+careful when using this command on running domains.
+
+=item B<migrate> [I<OPTIONS>] I<domain-id> I<host>
+
+Migrate a domain to another host machine. By default B<xl> relies on ssh as a
+transport mechanism between the two hosts.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-s> I<sshcommand>
+
+Use <sshcommand> instead of ssh. String will be passed to sh. If empty, run
+<host> instead of ssh <host> xl migrate-receive [-d -e].
+
+=item B<-e>
+
+On the new host, do not wait in the background (on <host>) for the death of the
+domain. See the corresponding option of the I<create> subcommand.
+
+=item B<-C> I<config>
+
+Send <config> instead of config file from creation.
+
+=item B<--debug>
+
+Print huge (!) amount of debug during the migration process.
+
+=back
+
+=item B<remus> [I<OPTIONS>] I<domain-id> I<host>
+
+Enable Remus HA or COLO HA for domain. By default B<xl> relies on ssh as a
+transport mechanism between the two hosts.
+
+B<NOTES>
+
+=over 4
+
+Remus support in xl is still in experimental (proof-of-concept) phase.
+Disk replication support is limited to DRBD disks.
+
+COLO support in xl is still in experimental (proof-of-concept)
+phase. All options are subject to change in the future.
+
+=back
+
+COLO disk configuration looks like:
+
+ disk = ['...,colo,colo-host=xxx,colo-port=xxx,colo-export=xxx,active-disk=xxx,hidden-disk=xxx...']
+
+The supported options are:
+
+=over 4
+
+=item B<colo-host> :Secondary host's ip address.
+
+=item B<colo-port> :Secondary host's port, we will run a nbd server on
+secondary host, and the nbd server will listen this port.
+
+=item B<colo-export> :Nbd server's disk export name of secondary host.
+
+=item B<active-disk> :Secondary's guest write will be buffered in this disk,
+and it's used by secondary.
+
+=item B<hidden-disk> :Primary's modified contents will be buffered in this
+disk, and it's used by secondary.
+
+=back
+
+COLO network configuration looks like:
+
+ vif = [ '...,forwarddev=xxx,...']
+
+The supported options are:
+
+=over 4
+
+=item B<forwarddev> :Forward devices for primary and secondary, they are
+directly connected.
+
+
+=back
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-i> I<MS>
+
+Checkpoint domain memory every MS milliseconds (default 200ms).
+
+=item B<-u>
+
+Disable memory checkpoint compression.
+
+=item B<-s> I<sshcommand>
+
+Use <sshcommand> instead of ssh. String will be passed to sh.
+If empty, run <host> instead of ssh <host> xl migrate-receive -r [-e].
+
+=item B<-e>
+
+On the new host, do not wait in the background (on <host>) for the death
+of the domain. See the corresponding option of the I<create> subcommand.
+
+=item B<-N> I<netbufscript>
+
+Use <netbufscript> to setup network buffering instead of the
+default script (/etc/xen/scripts/remus-netbuf-setup).
+
+=item B<-F>
+
+Run Remus in unsafe mode. Use this option with caution as failover may
+not work as intended.
+
+=item B<-b>
+
+Replicate memory checkpoints to /dev/null (blackhole).
+Generally useful for debugging. Requires enabling unsafe mode.
+
+=item B<-n>
+
+Disable network output buffering. Requires enabling unsafe mode.
+
+=item B<-d>
+
+Disable disk replication. Requires enabling unsafe mode.
+
+=item B<-c>
+
+Enable COLO HA. This conflicts with B<-i> and B<-b>, and memory
+checkpoint compression must be disabled.
+
+=back
+
+=item B<pause> I<domain-id>
+
+Pause a domain. When in a paused state the domain will still consume
+allocated resources such as memory, but will not be eligible for
+scheduling by the Xen hypervisor.
+
+=item B<reboot> [I<OPTIONS>] I<domain-id>
+
+Reboot a domain. This acts just as if the domain had the B<reboot>
+command run from the console. The command returns as soon as it has
+executed the reboot action, which may be significantly before the
+domain actually reboots.
+
+For HVM domains this requires PV drivers to be installed in your guest
+OS. If PV drivers are not present but you have configured the guest OS
+to behave appropriately you may be able to use the I<-F> option
+trigger a reset button press.
+
+The behavior of what happens to a domain when it reboots is set by the
+B<on_reboot> parameter of the domain configuration file when the
+domain was created.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-F>
+
+If the guest does not support PV reboot control then fallback to
+sending an ACPI power event (equivalent to the I<reset> option to
+I<trigger>.
+
+You should ensure that the guest is configured to behave as expected
+in response to this event.
+
+=back
+
+=item B<restore> [I<OPTIONS>] [I<ConfigFile>] I<CheckpointFile>
+
+Build a domain from an B<xl save> state file. See B<save> for more info.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-p>
+
+Do not unpause domain after restoring it.
+
+=item B<-e>
+
+Do not wait in the background for the death of the domain on the new host.
+See the corresponding option of the I<create> subcommand.
+
+=item B<-d>
+
+Enable debug messages.
+
+=item B<-V>, B<--vncviewer>
+
+Attach to domain's VNC server, forking a vncviewer process.
+
+=item B<-A>, B<--vncviewer-autopass>
+
+Pass VNC password to vncviewer via stdin.
+
+
+
+=back
+
+=item B<save> [I<OPTIONS>] I<domain-id> I<CheckpointFile> [I<ConfigFile>]
+
+Saves a running domain to a state file so that it can be restored
+later. Once saved, the domain will no longer be running on the
+system, unless the -c or -p options are used.
+B<xl restore> restores from this checkpoint file.
+Passing a config file argument allows the user to manually select the VM config
+file used to create the domain.
+
+=over 4
+
+=item B<-c>
+
+Leave domain running after creating the snapshot.
+
+=item B<-p>
+
+Leave domain paused after creating the snapshot.
+
+=back
+
+=item B<sharing> [I<domain-id>]
+
+List count of shared pages.
+
+B<OPTIONS>
+
+=over 4
+
+=item I<domain_id>
+
+List specifically for that domain. Otherwise, list for all domains.
+
+=back
+
+=item B<shutdown> [I<OPTIONS>] I<-a|domain-id>
+
+Gracefully shuts down a domain. This coordinates with the domain OS
+to perform graceful shutdown, so there is no guarantee that it will
+succeed, and may take a variable length of time depending on what
+services must be shutdown in the domain.
+
+For HVM domains this requires PV drivers to be installed in your guest
+OS. If PV drivers are not present but you have configured the guest OS
+to behave appropriately you may be able to use the I<-F> option
+trigger a power button press.
+
+The command returns immediately after signally the domain unless that
+B<-w> flag is used.
+
+The behavior of what happens to a domain when it reboots is set by the
+B<on_shutdown> parameter of the domain configuration file when the
+domain was created.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-a>, B<--all>
+
+Shutdown all guest domains. Often used when doing a complete shutdown
+of a Xen system.
+
+=item B<-w>, B<--wait>
+
+Wait for the domain to complete shutdown before returning.
+
+=item B<-F>
+
+If the guest does not support PV shutdown control then fallback to
+sending an ACPI power event (equivalent to the I<power> option to
+I<trigger>.
+
+You should ensure that the guest is configured to behave as expected
+in response to this event.
+
+=back
+
+=item B<sysrq> I<domain-id> I<letter>
+
+Send a <Magic System Request> to the domain, each type of request is
+represented by a different letter.
+It can be used to send SysRq requests to Linux guests, see sysrq.txt in
+your Linux Kernel sources for more information.
+It requires PV drivers to be installed in your guest OS.
+
+=item B<trigger> I<domain-id> I<nmi|reset|init|power|sleep|s3resume> [I<VCPU>]
+
+Send a trigger to a domain, where the trigger can be: nmi, reset, init, power
+or sleep. Optionally a specific vcpu number can be passed as an argument.
+This command is only available for HVM domains.
+
+=item B<unpause> I<domain-id>
+
+Moves a domain out of the paused state. This will allow a previously
+paused domain to now be eligible for scheduling by the Xen hypervisor.
+
+=item B<vcpu-set> I<domain-id> I<vcpu-count>
+
+Enables the I<vcpu-count> virtual CPUs for the domain in question.
+Like mem-set, this command can only allocate up to the maximum virtual
+CPU count configured at boot for the domain.
+
+If the I<vcpu-count> is smaller than the current number of active
+VCPUs, the highest number VCPUs will be hotplug removed. This may be
+important for pinning purposes.
+
+Attempting to set the VCPUs to a number larger than the initially
+configured VCPU count is an error. Trying to set VCPUs to < 1 will be
+quietly ignored.
+
+Some guests may need to actually bring the newly added CPU online
+after B<vcpu-set>, go to B<SEE ALSO> section for information.
+
+=item B<vcpu-list> [I<domain-id>]
+
+Lists VCPU information for a specific domain. If no domain is
+specified, VCPU information for all domains will be provided.
+
+=item B<vcpu-pin> [I<-f|--force>] I<domain-id> I<vcpu> I<cpus hard> I<cpus soft>
+
+Set hard and soft affinity for a I<vcpu> of <domain-id>. Normally VCPUs
+can float between available CPUs whenever Xen deems a different run state
+is appropriate.
+
+Hard affinity can be used to restrict this, by ensuring certain VCPUs
+can only run on certain physical CPUs. Soft affinity specifies a I<preferred>
+set of CPUs. Soft affinity needs special support in the scheduler, which is
+only provided in credit1.
+
+The keyword B<all> can be used to apply the hard and soft affinity masks to
+all the VCPUs in the domain. The symbol '-' can be used to leave either
+hard or soft affinity alone.
+
+For example:
+
+ xl vcpu-pin 0 3 - 6-9
+
+will set soft affinity for vCPU 3 of domain 0 to pCPUs 6,7,8 and 9,
+leaving its hard affinity untouched. On the othe hand:
+
+ xl vcpu-pin 0 3 3,4 6-9
+
+will set both hard and soft affinity, the former to pCPUs 3 and 4, the
+latter to pCPUs 6,7,8, and 9.
+
+Specifying I<-f> or I<--force> will remove a temporary pinning done by the
+operating system (normally this should be done by the operating system).
+In case a temporary pinning is active for a vcpu the affinity of this vcpu
+can't be changed without this option.
+
+=item B<vm-list>
+
+Prints information about guests. This list excludes information about
+service or auxiliary domains such as dom0 and stubdoms.
+
+B<EXAMPLE>
+
+An example format for the list is as follows:
+
+ UUID ID name
+ 59e1cf6c-6ab9-4879-90e7-adc8d1c63bf5 2 win
+ 50bc8f75-81d0-4d53-b2e6-95cb44e2682e 3 linux
+
+=item B<vncviewer> [I<OPTIONS>] I<domain-id>
+
+Attach to domain's VNC server, forking a vncviewer process.
+
+B<OPTIONS>
+
+=over 4
+
+=item I<--autopass>
+
+Pass VNC password to vncviewer via stdin.
+
+=back
+
+=back
+
+=head1 XEN HOST SUBCOMMANDS
+
+=over 4
+
+=item B<debug-keys> I<keys>
+
+Send debug I<keys> to Xen. It is the same as pressing the Xen
+"conswitch" (Ctrl-A by default) three times and then pressing "keys".
+
+=item B<dmesg> [B<-c>]
+
+Reads the Xen message buffer, similar to dmesg on a Linux system. The
+buffer contains informational, warning, and error messages created
+during Xen's boot process. If you are having problems with Xen, this
+is one of the first places to look as part of problem determination.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-c>, B<--clear>
+
+Clears Xen's message buffer.
+
+=back
+
+=item B<info> [B<-n>, B<--numa>]
+
+Print information about the Xen host in I<name : value> format. When
+reporting a Xen bug, please provide this information as part of the
+bug report. See I<http://wiki.xen.org/xenwiki/ReportingBugs> on how to
+report Xen bugs.
+
+Sample output looks as follows:
+
+ host : scarlett
+ release : 3.1.0-rc4+
+ version : #1001 SMP Wed Oct 19 11:09:54 UTC 2011
+ machine : x86_64
+ nr_cpus : 4
+ nr_nodes : 1
+ cores_per_socket : 4
+ threads_per_core : 1
+ cpu_mhz : 2266
+ hw_caps : bfebfbff:28100800:00000000:00003b40:009ce3bd:00000000:00000001:00000000
+ virt_caps : hvm hvm_directio
+ total_memory : 6141
+ free_memory : 4274
+ free_cpus : 0
+ outstanding_claims : 0
+ xen_major : 4
+ xen_minor : 2
+ xen_extra : -unstable
+ xen_caps : xen-3.0-x86_64 xen-3.0-x86_32p hvm-3.0-x86_32 hvm-3.0-x86_32p hvm-3.0-x86_64
+ xen_scheduler : credit
+ xen_pagesize : 4096
+ platform_params : virt_start=0xffff800000000000
+ xen_changeset : Wed Nov 02 17:09:09 2011 +0000 24066:54a5e994a241
+ xen_commandline : com1=115200,8n1 guest_loglvl=all dom0_mem=750M console=com1
+ cc_compiler : gcc version 4.4.5 (Debian 4.4.5-8)
+ cc_compile_by : sstabellini
+ cc_compile_domain : uk.xensource.com
+ cc_compile_date : Tue Nov 8 12:03:05 UTC 2011
+ xend_config_format : 4
+
+
+B<FIELDS>
+
+Not all fields will be explained here, but some of the less obvious
+ones deserve explanation:
+
+=over 4
+
+=item B<hw_caps>
+
+A vector showing what hardware capabilities are supported by your
+processor. This is equivalent to, though more cryptic, the flags
+field in /proc/cpuinfo on a normal Linux machine: they both derive from
+the feature bits returned by the cpuid command on x86 platforms.
+
+=item B<free_memory>
+
+Available memory (in MB) not allocated to Xen, or any other domains, or
+claimed for domains.
+
+=item B<outstanding_claims>
+
+When a claim call is done (see L<xl.conf>) a reservation for a specific
+amount of pages is set and also a global value is incremented. This
+global value (outstanding_claims) is then reduced as the domain's memory
+is populated and eventually reaches zero. Most of the time the value will
+be zero, but if you are launching multiple guests, and B<claim_mode> is
+enabled, this value can increase/decrease. Note that the value also
+affects the B<free_memory> - as it will reflect the free memory
+in the hypervisor minus the outstanding pages claimed for guests.
+See xl I<info> B<claims> parameter for detailed listing.
+
+=item B<xen_caps>
+
+The Xen version and architecture. Architecture values can be one of:
+x86_32, x86_32p (i.e. PAE enabled), x86_64, ia64.
+
+=item B<xen_changeset>
+
+The Xen mercurial changeset id. Very useful for determining exactly
+what version of code your Xen system was built from.
+
+=back
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-n>, B<--numa>
+
+List host NUMA topology information
+
+=back
+
+=item B<top>
+
+Executes the B<xentop> command, which provides real time monitoring of
+domains. Xentop is a curses interface, and reasonably self
+explanatory.
+
+=item B<uptime>
+
+Prints the current uptime of the domains running.
+
+=item B<claims>
+
+Prints information about outstanding claims by the guests. This provides
+the outstanding claims and currently populated memory count for the guests.
+These values added up reflect the global outstanding claim value, which
+is provided via the I<info> argument, B<outstanding_claims> value.
+The B<Mem> column has the cumulative value of outstanding claims and
+the total amount of memory that has been right now allocated to the guest.
+
+B<EXAMPLE>
+
+An example format for the list is as follows:
+
+ Name ID Mem VCPUs State Time(s) Claimed
+ Domain-0 0 2047 4 r----- 19.7 0
+ OL5 2 2048 1 --p--- 0.0 847
+ OL6 3 1024 4 r----- 5.9 0
+ Windows_XP 4 2047 1 --p--- 0.0 1989
+
+In which it can be seen that the OL5 guest still has 847MB of claimed
+memory (out of the total 2048MB where 1191MB has been allocated to
+the guest).
+
+=back
+
+=head1 SCHEDULER SUBCOMMANDS
+
+Xen ships with a number of domain schedulers, which can be set at boot
+time with the B<sched=> parameter on the Xen command line. By
+default B<credit> is used for scheduling.
+
+=over 4
+
+=item B<sched-credit> [I<OPTIONS>]
+
+Set or get credit scheduler parameters. The credit scheduler is a
+proportional fair share CPU scheduler built from the ground up to be
+work conserving on SMP hosts.
+
+Each domain (including Domain0) is assigned a weight and a cap.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-d DOMAIN>, B<--domain=DOMAIN>
+
+Specify domain for which scheduler parameters are to be modified or retrieved.
+Mandatory for modifying scheduler parameters.
+
+=item B<-w WEIGHT>, B<--weight=WEIGHT>
+
+A domain with a weight of 512 will get twice as much CPU as a domain
+with a weight of 256 on a contended host. Legal weights range from 1
+to 65535 and the default is 256.
+
+=item B<-c CAP>, B<--cap=CAP>
+
+The cap optionally fixes the maximum amount of CPU a domain will be
+able to consume, even if the host system has idle CPU cycles. The cap
+is expressed in percentage of one physical CPU: 100 is 1 physical CPU,
+50 is half a CPU, 400 is 4 CPUs, etc. The default, 0, means there is
+no upper cap.
+
+NB: Many systems have features that will scale down the computing
+power of a cpu that is not 100% utilized. This can be in the
+operating system, but can also sometimes be below the operating system
+in the BIOS. If you set a cap such that individual cores are running
+at less than 100%, this may have an impact on the performance of your
+workload over and above the impact of the cap. For example, if your
+processor runs at 2GHz, and you cap a vm at 50%, the power management
+system may also reduce the clock speed to 1GHz; the effect will be
+that your VM gets 25% of the available power (50% of 1GHz) rather than
+50% (50% of 2GHz). If you are not getting the performance you expect,
+look at performance and cpufreq options in your operating system and
+your BIOS.
+
+=item B<-p CPUPOOL>, B<--cpupool=CPUPOOL>
+
+Restrict output to domains in the specified cpupool.
+
+=item B<-s>, B<--schedparam>
+
+Specify to list or set pool-wide scheduler parameters.
+
+=item B<-t TSLICE>, B<--tslice_ms=TSLICE>
+
+Timeslice tells the scheduler how long to allow VMs to run before
+pre-empting. The default is 30ms. Valid ranges are 1ms to 1000ms.
+The length of the timeslice (in ms) must be higher than the length of
+the ratelimit (see below).
+
+=item B<-r RLIMIT>, B<--ratelimit_us=RLIMIT>
+
+Ratelimit attempts to limit the number of schedules per second. It
+sets a minimum amount of time (in microseconds) a VM must run before
+we will allow a higher-priority VM to pre-empt it. The default value
+is 1000 microseconds (1ms). Valid range is 100 to 500000 (500ms).
+The ratelimit length must be lower than the timeslice length.
+
+=back
+
+B<COMBINATION>
+
+The following is the effect of combining the above options:
+
+=over 4
+
+=item B<E<lt>nothingE<gt>> : List all domain params and sched params from all pools
+
+=item B<-d [domid]> : List domain params for domain [domid]
+
+=item B<-d [domid] [params]> : Set domain params for domain [domid]
+
+=item B<-p [pool]> : list all domains and sched params for [pool]
+
+=item B<-s> : List sched params for poolid 0
+
+=item B<-s [params]> : Set sched params for poolid 0
+
+=item B<-p [pool] -s> : List sched params for [pool]
+
+=item B<-p [pool] -s [params]> : Set sched params for [pool]
+
+=item B<-p [pool] -d>... : Illegal
+
+=back
+
+=item B<sched-credit2> [I<OPTIONS>]
+
+Set or get credit2 scheduler parameters. The credit2 scheduler is a
+proportional fair share CPU scheduler built from the ground up to be
+work conserving on SMP hosts.
+
+Each domain (including Domain0) is assigned a weight.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-d DOMAIN>, B<--domain=DOMAIN>
+
+Specify domain for which scheduler parameters are to be modified or retrieved.
+Mandatory for modifying scheduler parameters.
+
+=item B<-w WEIGHT>, B<--weight=WEIGHT>
+
+A domain with a weight of 512 will get twice as much CPU as a domain
+with a weight of 256 on a contended host. Legal weights range from 1
+to 65535 and the default is 256.
+
+=item B<-p CPUPOOL>, B<--cpupool=CPUPOOL>
+
+Restrict output to domains in the specified cpupool.
+
+=back
+
+=item B<sched-rtds> [I<OPTIONS>]
+
+Set or get rtds (Real Time Deferrable Server) scheduler parameters.
+This rt scheduler applies Preemptive Global Earliest Deadline First
+real-time scheduling algorithm to schedule VCPUs in the system.
+Each VCPU has a dedicated period and budget.
+VCPUs in the same domain have the same period and budget.
+While scheduled, a VCPU burns its budget.
+A VCPU has its budget replenished at the beginning of each period;
+Unused budget is discarded at the end of each period.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-d DOMAIN>, B<--domain=DOMAIN>
+
+Specify domain for which scheduler parameters are to be modified or retrieved.
+Mandatory for modifying scheduler parameters.
+
+=item B<-v VCPUID/all>, B<--vcpuid=VCPUID/all>
+
+Specify vcpu for which scheduler parameters are to be modified or retrieved.
+
+=item B<-p PERIOD>, B<--period=PERIOD>
+
+Period of time, in microseconds, over which to replenish the budget.
+
+=item B<-b BUDGET>, B<--budget=BUDGET>
+
+Amount of time, in microseconds, that the VCPU will be allowed
+to run every period.
+
+=item B<-c CPUPOOL>, B<--cpupool=CPUPOOL>
+
+Restrict output to domains in the specified cpupool.
+
+=back
+
+B<EXAMPLE>
+
+=over 4
+
+1) Use B<-v all> to see the budget and period of all the VCPUs of
+all the domains:
+
+ xl sched-rtds -v all
+ Cpupool Pool-0: sched=RTDS
+ Name ID VCPU Period Budget
+ Domain-0 0 0 10000 4000
+ vm1 1 0 300 150
+ vm1 1 1 400 200
+ vm1 1 2 10000 4000
+ vm1 1 3 1000 500
+ vm2 2 0 10000 4000
+ vm2 2 1 10000 4000
+
+Without any arguments, it will output the default scheduing
+parameters for each domain:
+
+ xl sched-rtds
+ Cpupool Pool-0: sched=RTDS
+ Name ID Period Budget
+ Domain-0 0 10000 4000
+ vm1 1 10000 4000
+ vm2 2 10000 4000
+
+
+2) Use, for instance B<-d vm1, -v all> to see the budget and
+period of all VCPUs of a specific domain (B<vm1>):
+
+ xl sched-rtds -d vm1 -v all
+ Name ID VCPU Period Budget
+ vm1 1 0 300 150
+ vm1 1 1 400 200
+ vm1 1 2 10000 4000
+ vm1 1 3 1000 500
+
+To see the parameters of a subset of the VCPUs of a domain, use:
+
+ xl sched-rtds -d vm1 -v 0 -v 3
+ Name ID VCPU Period Budget
+ vm1 1 0 300 150
+ vm1 1 3 1000 500
+
+If no B<-v> is speficified, the default scheduling parameter for the
+domain are shown:
+
+ xl sched-rtds -d vm1
+ Name ID Period Budget
+ vm1 1 10000 4000
+
+
+3) Users can set the budget and period of multiple VCPUs of a
+specific domain with only one command,
+e.g., "xl sched-rtds -d vm1 -v 0 -p 100 -b 50 -v 3 -p 300 -b 150".
+
+To change the parameters of all the VCPUs of a domain, use B<-v all>,
+e.g., "xl sched-rtds -d vm1 -v all -p 500 -b 250".
+
+=back
+
+=back
+
+=head1 CPUPOOLS COMMANDS
+
+Xen can group the physical cpus of a server in cpu-pools. Each physical CPU is
+assigned at most to one cpu-pool. Domains are each restricted to a single
+cpu-pool. Scheduling does not cross cpu-pool boundaries, so each cpu-pool has
+an own scheduler.
+Physical cpus and domains can be moved from one cpu-pool to another only by an
+explicit command.
+Cpu-pools can be specified either by name or by id.
+
+=over 4
+
+=item B<cpupool-create> [I<OPTIONS>] [I<ConfigFile>] [I<Variable=Value> ...]
+
+Create a cpu pool based an config from a I<ConfigFile> or command-line
+parameters. Variable settings from the I<ConfigFile> may be altered
+by specifying new or additional assignments on the command line.
+
+See the L<xlcpupool.cfg(5)> manpage for more information.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-f=FILE>, B<--defconfig=FILE>
+
+Use the given configuration file.
+
+=back
+
+=item B<cpupool-list> [I<-c|--cpus>] [I<cpu-pool>]
+
+List CPU pools on the host.
+If I<-c> is specified, B<xl> prints a list of CPUs used by I<cpu-pool>.
+
+=item B<cpupool-destroy> I<cpu-pool>
+
+Deactivates a cpu pool.
+This is possible only if no domain is active in the cpu-pool.
+
+=item B<cpupool-rename> I<cpu-pool> <newname>
+
+Renames a cpu-pool to I<newname>.
+
+=item B<cpupool-cpu-add> I<cpu-pool> I<cpus|node:nodes>
+
+Adds one or more CPUs or NUMA nodes to I<cpu-pool>. CPUs and NUMA
+nodes can be specified as single CPU/node IDs or as ranges.
+
+For example:
+
+ (a) xl cpupool-cpu-add mypool 4
+ (b) xl cpupool-cpu-add mypool 1,5,10-16,^13
+ (c) xl cpupool-cpu-add mypool node:0,nodes:2-3,^10-12,8
+
+means adding CPU 4 to mypool, in (a); adding CPUs 1,5,10,11,12,14,15
+and 16, in (b); and adding all the CPUs of NUMA nodes 0, 2 and 3,
+plus CPU 8, but keeping out CPUs 10,11,12, in (c).
+
+All the specified CPUs that can be added to the cpupool will be added
+to it. If some CPU can't (e.g., because they're already part of another
+cpupool), an error is reported about each one of them.
+
+=item B<cpupool-cpu-remove> I<cpus|node:nodes>
+
+Removes one or more CPUs or NUMA nodes from I<cpu-pool>. CPUs and NUMA
+nodes can be specified as single CPU/node IDs or as ranges, using the
+exact same syntax as in B<cpupool-cpu-add> above.
+
+=item B<cpupool-migrate> I<domain> I<cpu-pool>
+
+Moves a domain specified by domain-id or domain-name into a cpu-pool.
+Domain-0 can't be moved to another cpu-pool.
+
+=item B<cpupool-numa-split>
+
+Splits up the machine into one cpu-pool per numa node.
+
+=back
+
+=head1 VIRTUAL DEVICE COMMANDS
+
+Most virtual devices can be added and removed while guests are
+running, assuming that the necessary support exists in the guest. The
+effect to the guest OS is much the same as any hotplug event.
+
+=head2 BLOCK DEVICES
+
+=over 4
+
+=item B<block-attach> I<domain-id> I<disc-spec-component(s)> ...
+
+Create a new virtual block device. This will trigger a hotplug event
+for the guest.
+
+Note that only PV block devices are supported by block-attach.
+Requests to attach emulated devices (eg, vdev=hdc) will result in only
+the PV view being available to the guest.
+
+B<OPTIONS>
+
+=over 4
+
+=item I<domain-id>
+
+The domain id of the guest domain that the device will be attached to.
+
+=item I<disc-spec-component>
+
+A disc specification in the same format used for the B<disk> variable in
+the domain config file. See
+L<http://xenbits.xen.org/docs/unstable/misc/xl-disk-configuration.txt>.
+
+=back
+
+=item B<block-detach> I<domain-id> I<devid> [B<--force>]
+
+Detach a domain's virtual block device. I<devid> may be the symbolic
+name or the numeric device id given to the device by domain 0. You
+will need to run B<xl block-list> to determine that number.
+
+Detaching the device requires the cooperation of the domain. If the
+domain fails to release the device (perhaps because the domain is hung
+or is still using the device), the detach will fail. The B<--force>
+parameter will forcefully detach the device, but may cause IO errors
+in the domain.
+
+=item B<block-list> I<domain-id>
+
+List virtual block devices for a domain.
+
+=item B<cd-insert> I<domain-id> I<VirtualDevice> I<target>
+
+Insert a cdrom into a guest domain's existing virtial cd drive. The
+virtual drive must already exist but can be current empty.
+
+Only works with HVM domains.
+
+B<OPTIONS>
+
+=over 4
+
+=item I<VirtualDevice>
+
+How the device should be presented to the guest domain; for example "hdc".
+
+=item I<target>
+
+the target path in the backend domain (usually domain 0) to be
+exported; Can be a block device or a file etc. See B<target> in
+F<docs/misc/xl-disk-configuration.txt>.
+
+=back
+
+=item B<cd-eject> I<domain-id> I<VirtualDevice>
+
+Eject a cdrom from a guest's virtual cd drive. Only works with HVM domains.
+
+B<OPTIONS>
+
+=over 4
+
+=item I<VirtualDevice>
+
+How the device should be presented to the guest domain; for example "hdc".
+
+=back
+
+=back
+
+=head2 NETWORK DEVICES
+
+=over 4
+
+=item B<network-attach> I<domain-id> I<network-device>
+
+Creates a new network device in the domain specified by I<domain-id>.
+I<network-device> describes the device to attach, using the same format as the
+B<vif> string in the domain config file. See L<xl.cfg> and
+L<http://xenbits.xen.org/docs/unstable/misc/xl-network-configuration.html>
+for more informations.
+
+Note that only attaching PV network interface is supported.
+
+=item B<network-detach> I<domain-id> I<devid|mac>
+
+Removes the network device from the domain specified by I<domain-id>.
+I<devid> is the virtual interface device number within the domain
+(i.e. the 3 in vif22.3). Alternatively the I<mac> address can be used to
+select the virtual interface to detach.
+
+=item B<network-list> I<domain-id>
+
+List virtual network interfaces for a domain.
+
+=back
+
+=head2 CHANNEL DEVICES
+
+=over 4
+
+=item B<channel-list> I<domain-id>
+
+List virtual channel interfaces for a domain.
+
+=back
+
+=head2 VTPM DEVICES
+
+=over 4
+
+=item B<vtpm-attach> I<domain-id> I<vtpm-device>
+
+Creates a new vtpm device in the domain specified by I<domain-id>.
+I<vtpm-device> describes the device to attach, using the same format as the
+B<vtpm> string in the domain config file. See L<xl.cfg> for
+more information.
+
+=item B<vtpm-detach> I<domain-id> I<devid|uuid>
+
+Removes the vtpm device from the domain specified by I<domain-id>.
+I<devid> is the numeric device id given to the virtual trusted
+platform module device. You will need to run B<xl vtpm-list> to determine that number.
+Alternatively the I<uuid> of the vtpm can be used to
+select the virtual device to detach.
+
+=item B<vtpm-list> I<domain-id>
+
+List virtual trusted platform modules for a domain.
+
+=back
+
+=head1 PCI PASS-THROUGH
+
+=over 4
+
+=item B<pci-assignable-list>
+
+List all the assignable PCI devices.
+These are devices in the system which are configured to be
+available for passthrough and are bound to a suitable PCI
+backend driver in domain 0 rather than a real driver.
+
+=item B<pci-assignable-add> I<BDF>
+
+Make the device at PCI Bus/Device/Function BDF assignable to guests.
+This will bind the device to the pciback driver. If it is already
+bound to a driver, it will first be unbound, and the original driver
+stored so that it can be re-bound to the same driver later if desired.
+If the device is already bound, it will return success.
+
+CAUTION: This will make the device unusable by Domain 0 until it is
+returned with pci-assignable-remove. Care should therefore be taken
+not to do this on a device critical to domain 0's operation, such as
+storage controllers, network interfaces, or GPUs that are currently
+being used.
+
+=item B<pci-assignable-remove> [I<-r>] I<BDF>
+
+Make the device at PCI Bus/Device/Function BDF assignable to guests. This
+will at least unbind the device from pciback. If the -r option is specified,
+it will also attempt to re-bind the device to its original driver, making it
+usable by Domain 0 again. If the device is not bound to pciback, it will
+return success.
+
+=item B<pci-attach> I<domain-id> I<BDF>
+
+Hot-plug a new pass-through pci device to the specified domain.
+B<BDF> is the PCI Bus/Device/Function of the physical device to pass-through.
+
+=item B<pci-detach> [I<-f>] I<domain-id> I<BDF>
+
+Hot-unplug a previously assigned pci device from a domain. B<BDF> is the PCI
+Bus/Device/Function of the physical device to be removed from the guest domain.
+
+If B<-f> is specified, B<xl> is going to forcefully remove the device even
+without guest's collaboration.
+
+=item B<pci-list> I<domain-id>
+
+List pass-through pci devices for a domain.
+
+=back
+
+=head1 USB PASS-THROUGH
+
+=over 4
+
+=item B<usbctrl-attach> I<domain-id> I<usbctrl-device>
+
+Create a new USB controller in the domain specified by I<domain-id>,
+I<usbctrl-device> describes the device to attach, using form
+C<KEY=VALUE KEY=VALUE ...> where B<KEY=VALUE> has the same
+meaning as the B<usbctrl> description in the domain config file.
+See L<xl.cfg> for more information.
+
+=item B<usbctrl-detach> I<domain-id> I<devid>
+
+Destroy a USB controller from the specified domain.
+B<devid> is devid of the USB controller.
+
+=item B<usbdev-attach> I<domain-id> I<usbdev-device>
+
+Hot-plug a new pass-through USB device to the domain specified by
+I<domain-id>, I<usbdev-device> describes the device to attach, using
+form C<KEY=VALUE KEY=VALUE ...> where B<KEY=VALUE> has the same
+meaning as the B<usbdev> description in the domain config file.
+See L<xl.cfg> for more information.
+
+=item B<usbdev-detach> I<domain-id> I<controller=devid> I<port=number>
+
+Hot-unplug a previously assigned USB device from a domain.
+B<controller=devid> and B<port=number> is USB controller:port in guest
+where the USB device is attached to.
+
+=item B<usb-list> I<domain-id>
+
+List pass-through usb devices for a domain.
+
+=back
+
+=head1 TMEM
+
+=over 4
+
+=item B<tmem-list> I[<-l>] I<domain-id>
+
+List tmem pools. If I<-l> is specified, also list tmem stats.
+
+=item B<tmem-freeze> I<domain-id>
+
+Freeze tmem pools.
+
+=item B<tmem-thaw> I<domain-id>
+
+Thaw tmem pools.
+
+=item B<tmem-set> I<domain-id> [I<OPTIONS>]
+
+Change tmem settings.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-w> I<WEIGHT>
+
+Weight (int)
+
+=item B<-c> I<CAP>
+
+Cap (int)
+
+=item B<-p> I<COMPRESS>
+
+Compress (int)
+
+=back
+
+=item B<tmem-shared-auth> I<domain-id> [I<OPTIONS>]
+
+De/authenticate shared tmem pool.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-u> I<UUID>
+
+Specify uuid (abcdef01-2345-6789-1234-567890abcdef)
+
+=item B<-a> I<AUTH>
+
+0=auth,1=deauth
+
+=back
+
+=item B<tmem-freeable>
+
+Get information about how much freeable memory (MB) is in-use by tmem.
+
+=back
+
+=head1 FLASK
+
+B<FLASK> is a security framework that defines a mandatory access control policy
+providing fine-grained controls over Xen domains, allowing the policy writer
+to define what interactions between domains, devices, and the hypervisor are
+permitted. Some example of what you can do using XSM/FLASK:
+ - Prevent two domains from communicating via event channels or grants
+ - Control which domains can use device passthrough (and which devices)
+ - Restrict or audit operations performed by privileged domains
+ - Prevent a privileged domain from arbitrarily mapping pages from other
+ domains.
+
+You can find more details on how to use FLASK and an example security
+policy here: L<http://xenbits.xen.org/docs/unstable/misc/xsm-flask.txt>
+
+=over 4
+
+=item B<getenforce>
+
+Determine if the FLASK security module is loaded and enforcing its policy.
+
+=item B<setenforce> I<1|0|Enforcing|Permissive>
+
+Enable or disable enforcing of the FLASK access controls. The default is
+permissive, but this can be changed to enforcing by specifying "flask=enforcing"
+or "flask=late" on the hypervisor's command line.
+
+=item B<loadpolicy> I<policy-file>
+
+Load FLASK policy from the given policy file. The initial policy is provided to
+the hypervisor as a multiboot module; this command allows runtime updates to the
+policy. Loading new security policy will reset runtime changes to device labels.
+
+=back
+
+=head1 PLATFORM SHARED RESOURCE MONITORING/CONTROL
+
+Intel Haswell and later server platforms offer shared resource monitoring
+and control technologies. The availability of these technologies and the
+hardware capabilities can be shown with B<psr-hwinfo>.
+
+See L<http://xenbits.xen.org/docs/unstable/misc/xl-psr.html> for more
+information.
+
+=over 4
+
+=item B<psr-hwinfo> [I<OPTIONS>]
+
+Show Platform Shared Resource (PSR) hardware information.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-m>, B<--cmt>
+
+Show Cache Monitoring Technology (CMT) hardware information.
+
+=item B<-a>, B<--cat>
+
+Show Cache Allocation Technology (CAT) hardware information.
+
+=back
+
+=back
+
+=head2 CACHE MONITORING TECHNOLOGY
+
+Intel Haswell and later server platforms offer monitoring capability in each
+logical processor to measure specific platform shared resource metric, for
+example, L3 cache occupancy. In the Xen implementation, the monitoring
+granularity is domain level. To monitor a specific domain, just attach the
+domain id with the monitoring service. When the domain doesn't need to be
+monitored any more, detach the domain id from the monitoring service.
+
+Intel Broadwell and later server platforms also offer total/local memory
+bandwidth monitoring. Xen supports per-domain monitoring for these two
+additional monitoring types. Both memory bandwidth monitoring and L3 cache
+occupancy monitoring share the same set of underlying monitoring service. Once
+a domain is attached to the monitoring service, monitoring data can be shown
+for any of these monitoring types.
+
+=over 4
+
+=item B<psr-cmt-attach> [I<domain-id>]
+
+attach: Attach the platform shared resource monitoring service to a domain.
+
+=item B<psr-cmt-detach> [I<domain-id>]
+
+detach: Detach the platform shared resource monitoring service from a domain.
+
+=item B<psr-cmt-show> [I<psr-monitor-type>] [I<domain-id>]
+
+Show monitoring data for a certain domain or all domains. Current supported
+monitor types are:
+ - "cache-occupancy": showing the L3 cache occupancy(KB).
+ - "total-mem-bandwidth": showing the total memory bandwidth(KB/s).
+ - "local-mem-bandwidth": showing the local memory bandwidth(KB/s).
+
+=back
+
+=head2 CACHE ALLOCATION TECHNOLOGY
+
+Intel Broadwell and later server platforms offer capabilities to configure and
+make use of the Cache Allocation Technology (CAT) mechanisms, which enable more
+cache resources (i.e. L3 cache) to be made available for high priority
+applications. In the Xen implementation, CAT is used to control cache allocation
+on VM basis. To enforce cache on a specific domain, just set capacity bitmasks
+(CBM) for the domain.
+
+Intel Broadwell and later server platforms also offer Code/Data Prioritization
+(CDP) for cache allocations, which support specifying code or data cache for
+applications. CDP is used on a per VM basis in the Xen implementation. To
+specify code or data CBM for the domain, CDP feature must be enabled and CBM
+type options need to be specified when setting CBM, and the type options (code
+and data) are mutually exclusive.
+
+=over 4
+
+=item B<psr-cat-cbm-set> [I<OPTIONS>] I<domain-id> I<cbm>
+
+Set cache capacity bitmasks(CBM) for a domain. For how to specify I<cbm>
+please refer to L<http://xenbits.xen.org/docs/unstable/misc/xl-psr.html>.
+
+B<OPTIONS>
+
+=over 4
+
+=item B<-s SOCKET>, B<--socket=SOCKET>
+
+Specify the socket to process, otherwise all sockets are processed.
+
+=item B<-c>, B<--code>
+
+Set code CBM when CDP is enabled.
+
+=item B<-d>, B<--data>
+
+Set data CBM when CDP is enabled.
+
+=back
+
+=item B<psr-cat-show> [I<domain-id>]
+
+Show CAT settings for a certain domain or all domains.
+
+=back
+
+=head1 IGNORED FOR COMPATIBILITY WITH XM
+
+xl is mostly command-line compatible with the old xm utility used with
+the old Python xend. For compatibility, the following options are
+ignored:
+
+=over 4
+
+=item B<xl migrate --live>
+
+=back
+
+=head1 TO BE DOCUMENTED
+
+We need better documentation for:
+
+=over 4
+
+=item B<tmem>
+
+Transcendent Memory.
+
+=back
+
+=head1 SEE ALSO
+
+The following man pages:
+
+L<xl.cfg>(5), L<xlcpupool.cfg>(5), B<xentop>(1)
+
+And the following documents on the xen.org website:
+
+L<http://xenbits.xen.org/docs/unstable/misc/xl-network-configuration.html>
+L<http://xenbits.xen.org/docs/unstable/misc/xl-disk-configuration.txt>
+L<http://xenbits.xen.org/docs/unstable/misc/xsm-flask.txt>
+L<http://xenbits.xen.org/docs/unstable/misc/xl-psr.html>
+
+For systems that don't automatically bring CPU online:
+
+L<http://wiki.xen.org/wiki/Paravirt_Linux_CPU_Hotplug>
+
+=head1 BUGS
+
+Send bugs to xen-devel@lists.xen.org, see
+http://wiki.xen.org/xenwiki/ReportingBugs on how to send bug reports.