tools/libs/light/test_fdderegrace
tools/libs/light/tmp.*
tools/libs/light/xenlight.pc
-tools/libs/light/include/_*.h
tools/libs/stat/_paths.h
tools/libs/stat/headers.chk
tools/libs/stat/libxenstat.map
tools/libs/store/utils.h
tools/libs/store/xenstore.pc
tools/libs/store/xs_lib.c
-tools/libs/store/include/xenstore_lib.h
tools/libs/util/*.pc
tools/libs/util/_paths.h
tools/libs/util/libxlu_cfg_y.output
tools/hotplug/NetBSD/rc.d/xencommons
tools/hotplug/NetBSD/rc.d/xendriverdomain
tools/include/acpi
-tools/include/*.h
+tools/include/_libxl*.h
+tools/include/_xentoolcore_list.h
tools/include/xen/*
tools/include/xen-xsm/*
tools/include/xen-foreign/*.(c|h|size)
# XEN_ROOT
# MINIOS_TARGET_ARCH
-XENSTORE_CPPFLAGS = -isystem $(XEN_ROOT)/tools/libs/store/include
+XENSTORE_CPPFLAGS = -isystem $(XEN_ROOT)/tools/include
TOOLCORE_PATH = $(XEN_ROOT)/stubdom/libs-$(MINIOS_TARGET_ARCH)/toolcore
TOOLLOG_PATH = $(XEN_ROOT)/stubdom/libs-$(MINIOS_TARGET_ARCH)/toollog
EVTCHN_PATH = $(XEN_ROOT)/stubdom/libs-$(MINIOS_TARGET_ARCH)/evtchn
define LIB_defs
FILENAME_$(1) ?= xen$(1)
XEN_libxen$(1) = $$(XEN_ROOT)/tools/libs/$(1)
- CFLAGS_libxen$(1) = -I$$(XEN_libxen$(1))/include $$(CFLAGS_xeninclude)
+ CFLAGS_libxen$(1) = $$(CFLAGS_xeninclude)
SHDEPS_libxen$(1) = $$(foreach use,$$(USELIBS_$(1)),$$(SHLIB_libxen$$(use)))
LDLIBS_libxen$(1) = $$(SHDEPS_libxen$(1)) $$(XEN_libxen$(1))/lib$$(FILENAME_$(1))$$(libextension)
SHLIB_libxen$(1) = $$(SHDEPS_libxen$(1)) -Wl,-rpath-link=$$(XEN_libxen$(1))
# code which compiles against libxenctrl get __XEN_TOOLS__ and
# therefore sees the unstable hypercall interfaces.
-CFLAGS_libxenctrl += $(CFLAGS_libxentoollog) $(CFLAGS_libxenforeignmemory) $(CFLAGS_libxendevicemodel) -D__XEN_TOOLS__
-CFLAGS_libxenguest += $(CFLAGS_libxenevtchn) $(CFLAGS_libxenforeignmemory)
+CFLAGS_libxenctrl += -D__XEN_TOOLS__
ifeq ($(CONFIG_Linux),y)
LDLIBS_libxenstore += -ldl
--- /dev/null
+/**
+ * @file
+ * @section AUTHORS
+ *
+ * Copyright (C) 2010 Rafal Wojtczuk <rafal@invisiblethingslab.com>
+ *
+ * Authors:
+ * Rafal Wojtczuk <rafal@invisiblethingslab.com>
+ * Daniel De Graaf <dgdegra@tycho.nsa.gov>
+ *
+ * @section LICENSE
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * @section DESCRIPTION
+ *
+ * Originally borrowed from the Qubes OS Project, http://www.qubes-os.org,
+ * this code has been substantially rewritten to use the gntdev and gntalloc
+ * devices instead of raw MFNs and map_foreign_range.
+ *
+ * This is a library for inter-domain communication. A standard Xen ring
+ * buffer is used, with a datagram-based interface built on top. The grant
+ * reference and event channels are shared in XenStore under the path
+ * /local/domain/<srv-id>/data/vchan/<cli-id>/<port>/{ring-ref,event-channel}
+ *
+ * The ring.h macros define an asymmetric interface to a shared data structure
+ * that assumes all rings reside in a single contiguous memory space. This is
+ * not suitable for vchan because the interface to the ring is symmetric except
+ * for the setup. Unlike the producer-consumer rings defined in ring.h, the
+ * size of the rings used in vchan are determined at execution time instead of
+ * compile time, so the macros in ring.h cannot be used to access the rings.
+ */
+
+#include <xen/io/libxenvchan.h>
+#include <xen/xen.h>
+#include <xen/sys/evtchn.h>
+#include <xenevtchn.h>
+#include <xengnttab.h>
+
+/* Callers who don't care don't need to #include <xentoollog.h> */
+struct xentoollog_logger;
+
+struct libxenvchan_ring {
+ /* Pointer into the shared page. Offsets into buffer. */
+ struct ring_shared* shr;
+ /* ring data; may be its own shared page(s) depending on order */
+ void* buffer;
+ /**
+ * The size of the ring is (1 << order); offsets wrap around when they
+ * exceed this. This copy is required because we can't trust the order
+ * in the shared page to remain constant.
+ */
+ int order;
+};
+
+/**
+ * struct libxenvchan: control structure passed to all library calls
+ */
+struct libxenvchan {
+ /* Mapping handle for shared ring page */
+ union {
+ xengntshr_handle *gntshr; /* for server */
+ xengnttab_handle *gnttab; /* for client */
+ };
+ /* Pointer to shared ring page */
+ struct vchan_interface *ring;
+ /* event channel interface */
+ xenevtchn_handle *event;
+ uint32_t event_port;
+ /* informative flags: are we acting as server? */
+ int is_server:1;
+ /* true if server remains active when client closes (allows reconnection) */
+ int server_persist:1;
+ /* true if operations should block instead of returning 0 */
+ int blocking:1;
+ /* communication rings */
+ struct libxenvchan_ring read, write;
+};
+
+/**
+ * Set up a vchan, including granting pages
+ * @param logger Logger for libxc errors
+ * @param domain The peer domain that will be connecting
+ * @param xs_path Base xenstore path for storing ring/event data
+ * @param send_min The minimum size (in bytes) of the send ring (left)
+ * @param recv_min The minimum size (in bytes) of the receive ring (right)
+ * @return The structure, or NULL in case of an error
+ */
+struct libxenvchan *libxenvchan_server_init(struct xentoollog_logger *logger,
+ int domain, const char* xs_path,
+ size_t read_min, size_t write_min);
+/**
+ * Connect to an existing vchan. Note: you can reconnect to an existing vchan
+ * safely, however no locking is performed, so you must prevent multiple clients
+ * from connecting to a single server.
+ *
+ * @param logger Logger for libxc errors
+ * @param domain The peer domain to connect to
+ * @param xs_path Base xenstore path for storing ring/event data
+ * @return The structure, or NULL in case of an error
+ */
+struct libxenvchan *libxenvchan_client_init(struct xentoollog_logger *logger,
+ int domain, const char* xs_path);
+/**
+ * Close a vchan. This deallocates the vchan and attempts to free its
+ * resources. The other side is notified of the close, but can still read any
+ * data pending prior to the close.
+ */
+void libxenvchan_close(struct libxenvchan *ctrl);
+
+/**
+ * Packet-based receive: always reads exactly $size bytes.
+ * @param ctrl The vchan control structure
+ * @param data Buffer for data that was read
+ * @param size Size of the buffer and amount of data to read
+ * @return -1 on error, 0 if nonblocking and insufficient data is available, or $size
+ */
+int libxenvchan_recv(struct libxenvchan *ctrl, void *data, size_t size);
+/**
+ * Stream-based receive: reads as much data as possible.
+ * @param ctrl The vchan control structure
+ * @param data Buffer for data that was read
+ * @param size Size of the buffer
+ * @return -1 on error, otherwise the amount of data read (which may be zero if
+ * the vchan is nonblocking)
+ */
+int libxenvchan_read(struct libxenvchan *ctrl, void *data, size_t size);
+/**
+ * Packet-based send: send entire buffer if possible
+ * @param ctrl The vchan control structure
+ * @param data Buffer for data to send
+ * @param size Size of the buffer and amount of data to send
+ * @return -1 on error, 0 if nonblocking and insufficient space is available, or $size
+ */
+int libxenvchan_send(struct libxenvchan *ctrl, const void *data, size_t size);
+/**
+ * Stream-based send: send as much data as possible.
+ * @param ctrl The vchan control structure
+ * @param data Buffer for data to send
+ * @param size Size of the buffer
+ * @return -1 on error, otherwise the amount of data sent (which may be zero if
+ * the vchan is nonblocking)
+ */
+int libxenvchan_write(struct libxenvchan *ctrl, const void *data, size_t size);
+/**
+ * Waits for reads or writes to unblock, or for a close
+ */
+int libxenvchan_wait(struct libxenvchan *ctrl);
+/**
+ * Returns the event file descriptor for this vchan. When this FD is readable,
+ * libxenvchan_wait() will not block, and the state of the vchan has changed since
+ * the last invocation of libxenvchan_wait().
+ */
+int libxenvchan_fd_for_select(struct libxenvchan *ctrl);
+/**
+ * Query the state of the vchan shared page:
+ * return 0 when one side has called libxenvchan_close() or crashed
+ * return 1 when both sides are open
+ * return 2 [server only] when no client has yet connected
+ */
+int libxenvchan_is_open(struct libxenvchan* ctrl);
+/** Amount of data ready to read, in bytes */
+int libxenvchan_data_ready(struct libxenvchan *ctrl);
+/** Amount of data it is possible to send without blocking */
+int libxenvchan_buffer_space(struct libxenvchan *ctrl);
--- /dev/null
+/*
+ * Copyright (C) 2009 Citrix Ltd.
+ * Author Vincent Hanquez <vincent.hanquez@eu.citrix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ */
+
+/*
+ * libxl API compatibility
+ *
+ * From Xen 4.2 onwards the API of libxl will be maintained in a
+ * stable manner. This means that it should be possible to write an
+ * application against the API provided by libxl in Xen 4.2 and expect
+ * that it will continue to compile against future versions of Xen
+ * without source modification.
+ *
+ * In order to make such compatibility possible it is required that
+ * application which want to be exposed to a particular API #define
+ * LIBXL_API_VERSION before including libxl.h or any other libxl
+ * header. The syntax of the LIBXL_API_VERSION is:
+ * 0xVVSSEE
+ * where ($(XEN_xxx) from xen/Makefile):
+ * VV is the Xen major release number, $(XEN_VERSION)
+ * SS is the Xen sub version number, $(XEN_SUBVERSION)
+ * EE is the Xen extra version digit, first numeric part of
+ * $(XEN_EXTRAVERSION) not including the leading "."
+ * For example the first stable API version, supported by Xen 4.2.0,
+ * is 0x040200.
+ *
+ * Lack of LIBXL_API_VERSION means "the latest" which will
+ * change. Specifying an unknown LIBXL_API_VERSION will result in a
+ * compile time error.
+ *
+ * Identical versions of the libxl API will represented by the version
+ * containing the earliest instance of that API. e.g. if 4.2.0 and
+ * 4.3.0 contain an identical libxl API then only LIBXL_API_VERSION
+ * 0x040200 will be valid.
+ *
+ * We will try especially hard to avoid changing the API during a
+ * stable series, i.e. it should be unusual for the last byte of
+ * LIBXL_API_VERSION to be non-zero.
+ *
+ * In the event that a change is required which cannot be made
+ * backwards compatible in this manner a #define of the form
+ * LIBXL_HAVE_<interface> will always be added in order to make it
+ * possible to write applications which build against any version of
+ * libxl. Such changes are expected to be exceptional and used as a
+ * last resort. The barrier for backporting such a change to a stable
+ * branch will be very high.
+ *
+ * These guarantees apply only to stable releases of Xen. When an
+ * incompatible change is made in the unstable tree then
+ * LIBXL_API_VERSION will be bumped to the next expected stable
+ * release number on the first such change only. Applications which
+ * want to support building against Xen unstable are expected to track
+ * API changes in that tree until it is released as a stable release.
+ *
+ * API compatibility will be maintained for all versions of Xen using
+ * the same $(XEN_VERSION) (e.g. throughout a major release).
+ */
+
+/* LIBXL_HAVE_PHYSINFO_CAP_PV
+ *
+ * If this is defined, libxl_physinfo has a "cap_pv" field.
+ */
+#define LIBXL_HAVE_PHYSINFO_CAP_PV 1
+
+/* LIBXL_HAVE_CONSOLE_NOTIFY_FD
+ *
+ * If this is defined, libxl_console_exec and
+ * libxl_primary_console_exe take a notify_fd parameter. That
+ * parameter will be used to notify the caller that the console is connected.
+ */
+#define LIBXL_HAVE_CONSOLE_NOTIFY_FD 1
+
+/* LIBXL_HAVE_CONST_COPY_AND_LENGTH_FUNCTIONS
+ *
+ * If this is defined, the copy functions have constified src parameter and the
+ * length functions accept constified parameter.
+ */
+#define LIBXL_HAVE_CONST_COPY_AND_LENGTH_FUNCTIONS 1
+
+/* LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONST_B_INFO
+ *
+ * If this is defined, libxl_domain_need_memory no longer modifies
+ * the b_info paseed in.
+ */
+#define LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONST_B_INFO 1
+
+/* LIBXL_HAVE_VNUMA
+ *
+ * If this is defined the type libxl_vnode_info exists, and a
+ * field 'vnuma_nodes' is present in libxl_domain_build_info.
+ */
+#define LIBXL_HAVE_VNUMA 1
+
+/* LIBXL_HAVE_USERDATA_UNLINK
+ *
+ * If it is defined, libxl has a library function called
+ * libxl_userdata_unlink.
+ */
+#define LIBXL_HAVE_USERDATA_UNLINK 1
+
+/* LIBXL_HAVE_CPUPOOL_QUALIFIER_TO_CPUPOOLID
+ *
+ * If this is defined, libxl has a library function called
+ * libxl_cpupool_qualifier_to_cpupoolid, which takes in a CPU pool
+ * qualifier in the form of number or string, then returns the ID of
+ * that CPU pool.
+ */
+#define LIBXL_HAVE_CPUPOOL_QUALIFIER_TO_CPUPOOLID 1
+
+/* LIBXL_HAVE_CPUPOOL_ADD_REM_CPUMAP
+ *
+ * If this is defined, libxl has two library functions called
+ * libxl_cpupool_cpuadd_cpumap and libxl_cpupool_cpuremove_cpumap,
+ * which allow to add to or remove from a cpupool all the cpus
+ * specified in a bitmap.
+ */
+#define LIBXL_HAVE_CPUPOOL_ADD_REM_CPUMAP 1
+
+/*
+ *
+ * LIBXL_HAVE_BITMAP_AND_OR
+ *
+ * If this is defined, libxl has two library functions, libxl_bitmap_and
+ * and libxl_bitmap_or to compute the logical and and or of two bitmaps
+ */
+#define LIBXL_HAVE_BITMAP_AND_OR 1
+
+/*
+ * LIBXL_HAVE_FIRMWARE_PASSTHROUGH indicates the feature for
+ * passing in SMBIOS and ACPI firmware to HVM guests is present
+ * in the library.
+ */
+#define LIBXL_HAVE_FIRMWARE_PASSTHROUGH 1
+
+/*
+ * LIBXL_HAVE_DOMAIN_NODEAFFINITY indicates that a 'nodemap' field
+ * (of libxl_bitmap type) is present in libxl_domain_build_info,
+ * containing the node-affinity for the domain.
+ */
+#define LIBXL_HAVE_DOMAIN_NODEAFFINITY 1
+
+/*
+ * LIBXL_HAVE_PVUSB indicates functions for plugging in USB devices
+ * through pvusb -- both hotplug and at domain creation time..
+ */
+#define LIBXL_HAVE_PVUSB 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_HVM_VENDOR_DEVICE indicates that the
+ * libxl_vendor_device field is present in the hvm sections of
+ * libxl_domain_build_info. This field tells libxl which
+ * flavour of xen-pvdevice to enable in QEMU.
+ */
+#define LIBXL_HAVE_BUILDINFO_HVM_VENDOR_DEVICE 1
+
+/*
+ * The libxl_domain_build_info has the event_channels field.
+ */
+#define LIBXL_HAVE_BUILDINFO_EVENT_CHANNELS 1
+
+/*
+ * libxl_domain_build_info has the u.hvm.ms_vm_genid field.
+ */
+#define LIBXL_HAVE_BUILDINFO_HVM_MS_VM_GENID 1
+
+/*
+ * LIBXL_HAVE_VCPUINFO_SOFT_AFFINITY indicates that a 'cpumap_soft'
+ * field (of libxl_bitmap type) is present in libxl_vcpuinfo,
+ * containing the soft affinity of a vcpu.
+ */
+#define LIBXL_HAVE_VCPUINFO_SOFT_AFFINITY 1
+
+/*
+ * LIBXL_HAVE_SET_VCPUAFFINITY_FORCE indicates that the
+ * libxl_set_vcpuaffinity_force() library call is available.
+ */
+#define LIBXL_HAVE_SET_VCPUAFFINITY_FORCE 1
+
+/*
+ * LIBXL_HAVE_DEVICE_DISK_DIRECT_IO_SAFE indicates that a
+ * 'direct_io_safe' field (of boolean type) is present in
+ * libxl_device_disk.
+ */
+#define LIBXL_HAVE_DEVICE_DISK_DIRECT_IO_SAFE 1
+
+/*
+ * The libxl_device_disk has the discard_enable field.
+ */
+#define LIBXL_HAVE_LIBXL_DEVICE_DISK_DISCARD_ENABLE 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_IOMEM_START_GFN indicates that it is possible
+ * to specify the start guest frame number used to map a range of I/O
+ * memory machine frame numbers via the 'gfn' field (of type uint64)
+ * of the 'iomem' structure. An array of iomem structures is embedded
+ * in libxl_domain_build_info and used to map the indicated memory
+ * ranges during domain build.
+ */
+#define LIBXL_HAVE_BUILDINFO_IOMEM_START_GFN 1
+
+/*
+ * LIBXL_HAVE_SCHED_RTDS indicates that the RTDS real time scheduler
+ * is available. A 'budget' field added in libxl_domain_sched_params.
+ */
+#define LIBXL_HAVE_SCHED_RTDS 1
+
+/*
+ * LIBXL_HAVE_SCHED_NULL indicates that the 'null' static scheduler
+ * is available.
+ */
+#define LIBXL_HAVE_SCHED_NULL 1
+
+/*
+ * libxl_domain_build_info has u.hvm.viridian_enable and _disable bitmaps
+ * of the specified width.
+ */
+#define LIBXL_HAVE_BUILDINFO_HVM_VIRIDIAN_ENABLE_DISABLE 1
+#define LIBXL_BUILDINFO_HVM_VIRIDIAN_ENABLE_DISABLE_WIDTH 64
+
+/*
+ * libxl_domain_build_info has the u.hvm.mmio_hole_memkb field.
+ */
+#define LIBXL_HAVE_BUILDINFO_HVM_MMIO_HOLE_MEMKB 1
+
+/*
+ * libxl_domain_info returns ERROR_DOMAIN_NOTFOUND if the domain
+ * is not present, instead of ERROR_INVAL.
+ */
+#define LIBXL_HAVE_ERROR_DOMAIN_NOTFOUND 1
+
+/*
+ * libxl_domain_build_info has device_tree and libxl_device_dtdev
+ * exists. This mean Device Tree passthrough is supported for ARM
+ */
+#define LIBXL_HAVE_DEVICETREE_PASSTHROUGH 1
+
+/*
+ * libxl_domain_build_info has device_model_user to specify the user to
+ * run the device model with. See docs/misc/qemu-deprivilege.txt.
+ */
+#define LIBXL_HAVE_DEVICE_MODEL_USER 1
+
+/*
+ * libxl_vcpu_sched_params is used to store per-vcpu params.
+ */
+#define LIBXL_HAVE_VCPU_SCHED_PARAMS 1
+
+/*
+ * LIBXL_HAVE_SCHED_RTDS_VCPU_PARAMS indicates RTDS scheduler
+ * now supports per-vcpu settings.
+ */
+#define LIBXL_HAVE_SCHED_RTDS_VCPU_PARAMS 1
+
+/*
+ * LIBXL_HAVE_SCHED_RTDS_VCPU_EXTRA indicates RTDS scheduler
+ * now supports per-vcpu extratime settings.
+ */
+#define LIBXL_HAVE_SCHED_RTDS_VCPU_EXTRA 1
+
+/*
+ * libxl_domain_build_info has the arm.gic_version field.
+ */
+#define LIBXL_HAVE_BUILDINFO_ARM_GIC_VERSION 1
+
+/*
+ * libxl_domain_build_info has the arch_arm.tee field.
+ */
+#define LIBXL_HAVE_BUILDINFO_ARCH_ARM_TEE 1
+
+/*
+ * LIBXL_HAVE_SOFT_RESET indicates that libxl supports performing
+ * 'soft reset' for domains and there is 'soft_reset' shutdown reason
+ * in enum libxl_shutdown_reason.
+ */
+#define LIBXL_HAVE_SOFT_RESET 1
+
+/*
+ * LIBXL_HAVE_APIC_ASSIST indicates that the 'apic_assist' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_APIC_ASSIST 1
+
+/*
+ * LIBXL_HAVE_BUILD_ID means that libxl_version_info has the extra
+ * field for the hypervisor build_id.
+ */
+#define LIBXL_HAVE_BUILD_ID 1
+
+/*
+ * LIBXL_HAVE_QEMU_MONITOR_COMMAND indiactes the availability of the
+ * libxl_qemu_monitor_command() function.
+ */
+#define LIBXL_HAVE_QEMU_MONITOR_COMMAND 1
+
+/*
+ * LIBXL_HAVE_SCHED_CREDIT2_PARAMS indicates the existance of a
+ * libxl_sched_credit2_params structure, containing Credit2 scheduler
+ * wide parameters (i.e., the ratelimiting value).
+ */
+#define LIBXL_HAVE_SCHED_CREDIT2_PARAMS 1
+
+/*
+ * LIBXL_HAVE_SCHED_CREDIT_MIGR_DELAY indicates that there is a field
+ * in libxl_sched_credit_params called vcpu_migr_delay_us which controls
+ * the resistance of the vCPUs of the cpupool to migrations among pCPUs.
+ */
+#define LIBXL_HAVE_SCHED_CREDIT_MIGR_DELAY
+
+/*
+ * LIBXL_HAVE_VIRIDIAN_CRASH_CTL indicates that the 'crash_ctl' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_VIRIDIAN_CRASH_CTL 1
+
+/*
+ * LIBXL_HAVE_VIRIDIAN_SYNIC indicates that the 'synic' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_VIRIDIAN_SYNIC 1
+
+/*
+ * LIBXL_HAVE_VIRIDIAN_STIMER indicates that the 'stimer' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_VIRIDIAN_STIMER 1
+
+/*
+ * LIBXL_HAVE_VIRIDIAN_HCALL_IPI indicates that the 'hcall_ipi' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_VIRIDIAN_HCALL_IPI 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_HVM_ACPI_LAPTOP_SLATE indicates that
+ * libxl_domain_build_info has the u.hvm.acpi_laptop_slate field.
+ */
+#define LIBXL_HAVE_BUILDINFO_HVM_ACPI_LAPTOP_SLATE 1
+
+/*
+ * LIBXL_HAVE_P9S indicates that the p9 field in IDL has been changed to p9s
+ */
+#define LIBXL_HAVE_P9S 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_ARM_VUART indicates that the toolstack supports virtual UART
+ * for ARM.
+ */
+#define LIBXL_HAVE_BUILDINFO_ARM_VUART 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_GRANT_LIMITS indicates that libxl_domain_build_info
+ * has the max_grant_frames and max_maptrack_frames fields.
+ */
+#define LIBXL_HAVE_BUILDINFO_GRANT_LIMITS 1
+
+#define LIBXL_MAX_GRANT_DEFAULT (~(uint32_t)0)
+#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 32 /* deprecated */
+#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 1024 /* deprecated */
+/*
+ * LIBXL_HAVE_BUILDINFO_GRANT_DEFAULT indicates that the default
+ * values of max_grant_frames and max_maptrack_frames fields in
+ * libxl_domain_build_info are the special sentinel value
+ * LIBXL_MAX_GRANT_DEFAULT rather than the fixed values above.
+ * This means to use the hypervisor's default.
+ */
+#define LIBXL_HAVE_BUILDINFO_GRANT_DEFAULT 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_* indicates that libxl_domain_build_info has
+ * the field represented by the '*'. The original position of those
+ * fields is:
+ * - u.hvm.timer_mode
+ * - u.hvm.apic
+ * - u.hvm.nested_hvm
+ * - u.pv.bootloader
+ * - u.pv.bootloader_args
+ */
+#define LIBXL_HAVE_BUILDINFO_TIMER_MODE 1
+#define LIBXL_HAVE_BUILDINFO_APIC 1
+#define LIBXL_HAVE_BUILDINFO_NESTED_HVM 1
+#define LIBXL_HAVE_BUILDINFO_BOOTLOADER 1
+#define LIBXL_HAVE_BUILDINFO_BOOTLOADER_ARGS 1
+
+/*
+ * LIBXL_HAVE_EXTENDED_VKB indicates that libxl_device_vkb has extended fields:
+ * - unique_id;
+ * - feature_disable_keyboard;
+ * - feature_disable_pointer;
+ * - feature_abs_pointer;
+ * - feature_raw_pointer;
+ * - feature_multi_touch;
+ * - width;
+ * - height;
+ * - multi_touch_width;
+ * - multi_touch_height;
+ * - multi_touch_num_contacts.
+ */
+#define LIBXL_HAVE_EXTENDED_VKB 1
+
+/*
+ * LIBXL_HAVE_PHYSINFO_CAP_HAP_SHADOW indicates that libxl_physinfo has
+ * cap_hap and cap_shadow fields reflecting the hardware and Xen availability
+ * of Hardware Assisted, and Shadow paging support.
+ */
+#define LIBXL_HAVE_PHYSINFO_CAP_HAP_SHADOW 1
+
+/*
+ * LIBXL_HAVE_PHYSINFO_CAP_IOMMU_HAP_PT_SHARE indicates that libxl_physinfo
+ * has a cap_iommu_hap_pt_share field that indicates whether the hardware
+ * supports sharing the IOMMU and HAP page tables.
+ */
+#define LIBXL_HAVE_PHYSINFO_CAP_IOMMU_HAP_PT_SHARE 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_IOMMU_MEMKB indicates thate libxl_domain_build_info
+ * has an iommu_memkb field which should be set with the amount of memory
+ * overhead needed by the domain for populating IOMMU page tables.
+ */
+#define LIBXL_HAVE_BUILDINFO_IOMMU_MEMKB 1
+
+/*
+ * LIBXL_HAVE_CREATEINFO_PASSTHROUGH indicates that
+ * libxl_domain_create_info has a passthrough field (which is a
+ * libxl_passthrough enumeration) that indicates whether device pass-
+ * through is enabled for the domain and, if so, whether the IOMMU and
+ * HAP page tables may be shared or not.
+ */
+#define LIBXL_HAVE_CREATEINFO_PASSTHROUGH 1
+
+/*
+ * LIBXL_HAVE_DISK_SAFE_REMOVE indicates that the
+ * libxl_device_disk_safe_remove() function is defined.
+ */
+#define LIBXL_HAVE_DISK_SAFE_REMOVE 1
+
+/*
+ * libxl ABI compatibility
+ *
+ * The only guarantee which libxl makes regarding ABI compatibility
+ * across releases is that the SONAME will always be bumped whenever
+ * the ABI is changed in an incompatible way.
+ *
+ * This applies within stable branches as well as
+ * development branches. It is possible that a new stable release of
+ * Xen may require a rebuild of applications using the
+ * library. However per the API compatibility gaurantees such a
+ * rebuild should not normally require any source level changes.
+ *
+ * As with the API compatiblity the SONAME will only be bumped for the
+ * first ABI incompatible change in a development branch.
+ */
+
+/*
+ * libxl memory management
+ *
+ * From the point of view of the application (ie, libxl's caller),
+ * struct libxl_ctx* is threadsafe, and all returned allocated
+ * structures are obtained from malloc(), and must be freed by the
+ * caller either directly or by calling an appropriate free function
+ * provided by libxl. Ie the application does not get automatic
+ * assistance from libxl in managing these allocations.
+ *
+ * Specific details are in the header comments which should be found
+ * in libxl.h or libxlutil.h, next to the relevant function
+ * declarations.
+ *
+ * Internally, libxl has a garbage collection scheme which allows much libxl
+ * code to allocate strings etc. for internal use without needing to
+ * free them. These are called "temporary allocations".
+ *
+ * The pool for these temporary allocations, along with any other
+ * thread-specific data which is private to libxl but shared between
+ * libxl functions (such as the current xenstore transaction), is
+ * stored in the "gc context" which is a special enhanced context
+ * structure allocated automatically by convenience macros at every
+ * entry to libxl.
+ *
+ * Every libxl function falls into one of these categories:
+ *
+ * 1. Public functions (declared in libxl.h, libxlutil.h), which may
+ * be called by libxl applications. If a public function returns
+ * any allocated object to its caller, that object must have come
+ * from malloc.
+ *
+ * The definitions of public functions MUST use the gc context
+ * initialisation macros (or do the equivalent work themselves).
+ * These macros will ensure that all temporary allocations will be
+ * automatically freed before the function returns to its caller.
+ *
+ * A public function may be called from within libxl; the call
+ * context initialisation macros will make sure that the internal
+ * caller's context is reused (eg, so that the same xenstore
+ * transaction is used). But in-libxl callers of libxl public
+ * functions should note that any libxl public function may cause
+ * recursively reentry into libxl via the application's event
+ * callback hook.
+ *
+ * Public functions have names like libxl_foobar.
+ *
+ * 2. Private functions, which may not be called by libxl
+ * applications; they are not declared in libxl.h or libxlutil.h
+ * and they may not be called other than by other libxl functions.
+ *
+ * Private functions should not use the gc context initialisation
+ * macros.
+ *
+ * Private functions have names like libxl__foobar (NB, two underscores).
+ * Also the declaration of such functions must be preceeded by the _hidden
+ * macro.
+ *
+ * Allocations made by a libxl function fall into one of the following
+ * categories (where "object" includes any memory allocation):
+ *
+ * (a) Objects which are not returned to the function's caller.
+ * These should be allocated from the temporary pool.
+ *
+ * (b) Objects which are intended for return to the calling
+ * application. This includes all allocated objects returned by
+ * any public function.
+ *
+ * It may also include objects allocated by an internal function
+ * specifically for eventual return by the function's external
+ * callers, but this situation should be clearly documented in
+ * comments.
+ *
+ * These should be allocated from malloc() et al. and comments
+ * near the function declaration should explain the memory
+ * ownership. If a simple free() by the application is not
+ * sufficient, a suitable public freeing function should be
+ * provided.
+ *
+ * (c) Internal objects whose size and/or lifetime dictate explicit
+ * memory management within libxl. This includes objects which
+ * will be embedded in opaque structures which will be returned to
+ * the libxl caller (more generally, any internal object whose
+ * lifetime exceeds the libxl entrypoint which creates it) and
+ * objects which are so large or numerous that explicit memory
+ * management is required.
+ *
+ * These should be allocated from malloc() et al., and freed
+ * explicitly at the appropriate point. The situation should be
+ * documented in comments.
+ *
+ * (d) Objects which are allocated by internal-only functions and
+ * returned to the function's (therefore, internal) caller but are
+ * strictly for internal use by other parts of libxl. These
+ * should be allocated from the temporary pool.
+ *
+ * Where a function's primary purpose is to return such an object,
+ * it should have a libxl__gc * as it's first argument.
+ *
+ * Note that there are two ways to change an allocation from this
+ * category to the "public" category. Either the implementation
+ * is kept internal and a wrapper function duplicates all memory
+ * allocations so that they are suitable for return to external
+ * callers or the implementation uses plain malloc() et al calls
+ * and an internal wrapper adds the relevant pointers to the gc.
+ * The latter method is preferred for obvious performance reasons.
+ *
+ * No temporary objects allocated from the pool may be explicitly freed.
+ * Therefore public functions which initialize a libxl__gc MUST call
+ * libxl__free_all() before returning.
+ *
+ * Memory allocation failures are not handled gracefully. If malloc
+ * (or realloc) fails, libxl will cause the entire process to print
+ * a message to stderr and exit with status 255.
+ */
+/*
+ * libxl types
+ *
+ * Most libxl types are defined by the libxl IDL (see
+ * libxl_types.idl). The library provides a common set of methods for
+ * initialising and freeing these types.
+ *
+ * IDL-generated libxl types should be used as follows: the user must
+ * always call the "init" function before using a type, even if the
+ * variable is simply being passed by reference as an out parameter
+ * to a libxl function. The user must always calls "dispose" exactly
+ * once afterwards, to clean up, regardless of whether operations on
+ * this object succeeded or failed. See the xl code for examples.
+ *
+ * "init" and "dispose" are idempotent.
+ *
+ * void libxl_<type>_init(<type> *p):
+ *
+ * Initialises the members of "p" to all defaults. These may either
+ * be special value which indicates to the library that it should
+ * select an appropriate default when using this field or actual
+ * default values.
+ *
+ * Some fields within a data type (e.g. unions) cannot be sensibly
+ * initialised without further information. In these cases a
+ * separate subfield initialisation function is provided (see
+ * below).
+ *
+ * An instance which has been initialised using this method can
+ * always be safely passed to the dispose function (see
+ * below). This is true even if the data type contains fields which
+ * require a separate call to a subfield initialisation function.
+ *
+ * This method is provided for any aggregate type which is used as
+ * an input parameter.
+ *
+ * void libxl_<type>_init_<subfield>(<type> *p, subfield):
+ *
+ * Initialise those parts of "p" which are not initialised by the
+ * main init function due to the unknown value of "subfield". Sets
+ * p->subfield as well as initialising any fields to their default
+ * values.
+ *
+ * p->subfield must not have been previously initialised.
+ *
+ * This method is provided for any aggregate type.
+ *
+ * void libxl_<type>_dispose(instance *p):
+ *
+ * Frees any dynamically allocated memory used by the members of
+ * "p" but not the storage used by "p" itself (this allows for the
+ * allocation of arrays of types and for the composition of types).
+ *
+ * char *libxl_<type>_to_json(instance *p)
+ *
+ * Generates a JSON object from "p" in the form of a NULL terminated
+ * string.
+ *
+ * <type *> libxl_<type>_from_json(const char *json)
+ * int libxl_<type>_from_json(const char *json)
+ *
+ * Parses "json" and returns:
+ *
+ * an int value, if <type> is enumeration type. The value is the enum value
+ * representing the respective string in "json".
+ *
+ * an instance of <type>, if <type> is aggregate type. The returned
+ * instance has its fields filled in by the parser according to "json".
+ *
+ * If the parsing fails, caller cannot rely on the value / instance
+ * returned.
+ */
+#ifndef LIBXL_H
+#define LIBXL_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <sys/wait.h> /* for pid_t */
+
+#include <xentoollog.h>
+
+typedef struct libxl__ctx libxl_ctx;
+
+#include <libxl_uuid.h>
+#include <_libxl_list.h>
+
+/* API compatibility. */
+#ifdef LIBXL_API_VERSION
+#if LIBXL_API_VERSION != 0x040200 && LIBXL_API_VERSION != 0x040300 && \
+ LIBXL_API_VERSION != 0x040400 && LIBXL_API_VERSION != 0x040500 && \
+ LIBXL_API_VERSION != 0x040700 && LIBXL_API_VERSION != 0x040800 && \
+ LIBXL_API_VERSION != 0x041300 && LIBXL_API_VERSION != 0x041400
+#error Unknown LIBXL_API_VERSION
+#endif
+#endif
+
+/* LIBXL_HAVE_RETRIEVE_DOMAIN_CONFIGURATION
+ *
+ * If this is defined we have libxl_retrieve_domain_configuration which
+ * returns the current configuration of a domain, which can be used to
+ * rebuild a domain.
+ */
+#define LIBXL_HAVE_RETRIEVE_DOMAIN_CONFIGURATION 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_VCPU_AFFINITY_ARRAYS
+ *
+ * If this is defined, then the libxl_domain_build_info structure will
+ * contain two arrays of libxl_bitmap-s, with all the necessary information
+ * to set the hard affinity (vcpu_hard_affinity) and the soft affinity
+ * (vcpu_soft_affinity) of the VCPUs.
+ *
+ * Note that, if the vcpu_hard_affinity array is used, libxl will ignore
+ * the content of the cpumap field of libxl_domain_build_info. That is to
+ * say, if the array is allocated and used by the caller, it is it and
+ * only it that determines the hard affinity of the domain's VCPUs.
+ *
+ * The number of libxl_bitmap-s in the arrays should be equal to the
+ * maximum number of VCPUs of the domain. If there only are N elements in
+ * an array, with N smaller the the maximum number of VCPUs, the hard or
+ * soft affinity (depending on which array we are talking about) will be
+ * set only for the first N VCPUs. The other VCPUs will just have affinity,
+ * both hard and soft, with all the host PCPUs.
+ * Each bitmap should be big enough to accommodate the maximum number of
+ * PCPUs of the host.
+ */
+#define LIBXL_HAVE_BUILDINFO_VCPU_AFFINITY_ARRAYS 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_VKB_DEVICE
+ *
+ * If this is defined, then the libxl_domain_build_info structure will
+ * contain a boolean hvm.vkb_device which instructs libxl whether to include
+ * a vkbd at build time or not.
+ */
+#define LIBXL_HAVE_BUILDINFO_VKB_DEVICE 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_USBDEVICE_LIST
+ *
+ * If this is defined, then the libxl_domain_build_info structure will
+ * contain hvm.usbdevice_list, a libxl_string_list type that contains
+ * a list of USB devices to specify on the qemu command-line.
+ *
+ * If it is set, callers may use either hvm.usbdevice or
+ * hvm.usbdevice_list, but not both; if both are set, libxl will
+ * throw an error.
+ *
+ * If this is not defined, callers can only use hvm.usbdevice. Note
+ * that this means only one device can be added at domain build time.
+ */
+#define LIBXL_HAVE_BUILDINFO_USBDEVICE_LIST 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_USBVERSION
+ *
+ * If this is defined, then the libxl_domain_build_info structure will
+ * contain hvm.usbversion, a integer type that contains a USB
+ * controller version to specify on the qemu upstream command-line.
+ *
+ * If it is set, callers may use hvm.usbversion to specify if the usb
+ * controller is usb1, usb2 or usb3.
+ *
+ * If this is not defined, the hvm.usbversion field does not exist.
+ */
+#define LIBXL_HAVE_BUILDINFO_USBVERSION 1
+
+/*
+ * LIBXL_HAVE_DEVICE_BACKEND_DOMNAME
+ *
+ * If this is defined, libxl_device_* structures containing a backend_domid
+ * field also contain a backend_domname field. If backend_domname is set, it is
+ * resolved to a domain ID when the device is used and takes precedence over the
+ * backend_domid field.
+ *
+ * If this is not defined, the backend_domname field does not exist.
+ */
+#define LIBXL_HAVE_DEVICE_BACKEND_DOMNAME 1
+
+/*
+ * LIBXL_HAVE_NONCONST_EVENT_OCCURS_EVENT_ARG
+ *
+ * This argument was erroneously "const" in the 4.2 release despite
+ * the requirement for the callback to free the event.
+ */
+#if LIBXL_API_VERSION != 0x040200
+#define LIBXL_HAVE_NONCONST_EVENT_OCCURS_EVENT_ARG 1
+#endif
+
+/*
+ * LIBXL_HAVE_NONCONST_LIBXL_BASENAME_RETURN_VALUE
+ *
+ * The return value of libxl_basename is malloc'ed but the erroneously
+ * marked as "const" in releases before 4.5.
+ */
+#if !defined(LIBXL_API_VERSION) || LIBXL_API_VERSION >= 0x040500
+#define LIBXL_HAVE_NONCONST_LIBXL_BASENAME_RETURN_VALUE 1
+#endif
+
+/*
+ * LIBXL_HAVE_PHYSINFO_OUTSTANDING_PAGES
+ *
+ * If this is defined, libxl_physinfo structure will contain an uint64 field
+ * called outstanding_pages, containing the number of pages claimed but not
+ * yet allocated for all domains.
+ */
+#define LIBXL_HAVE_PHYSINFO_OUTSTANDING_PAGES 1
+
+/*
+ * LIBXL_HAVE_PHYSINFO_MAX_POSSIBLE_MFN
+ *
+ * If this is defined, libxl_physinfo structure will contain an uint64 field
+ * called max_possible_mfn, containing the highest possible mfn on this host,
+ * possibly taking memory hotplug into account.
+ */
+#define LIBXL_HAVE_PHYSINFO_MAX_POSSIBLE_MFN 1
+
+/*
+ * LIBXL_HAVE_DOMINFO_OUTSTANDING_MEMKB 1
+ *
+ * If this is defined, libxl_dominfo will contain a MemKB type field called
+ * outstanding_memkb, containing the amount of claimed but not yet allocated
+ * memory for a specific domain.
+ */
+#define LIBXL_HAVE_DOMINFO_OUTSTANDING_MEMKB 1
+
+/*
+ * LIBXL_HAVE_DOMINFO_NEVER_STOP
+ *
+ * If this is defined, libxl_dominfo will contain a flag called never_stop
+ * indicating that the specific domain should never be stopped by the
+ * toolstack.
+ */
+#define LIBXL_HAVE_DOMINFO_NEVER_STOP 1
+
+/*
+ * LIBXL_HAVE_QXL
+ *
+ * If defined, then the libxl_vga_interface_type will contain another value:
+ * "QXL". This value define if qxl vga is supported.
+ *
+ * If this is not defined, the qxl vga support is missed.
+ */
+#define LIBXL_HAVE_QXL 1
+
+/*
+ * LIBXL_HAVE_SPICE_VDAGENT
+ *
+ * If defined, then the libxl_spice_info structure will contain a boolean type:
+ * vdagent and clipboard_sharing. These values define if Spice vdagent and
+ * clipboard sharing are enabled.
+ *
+ * If this is not defined, the Spice vdagent support is ignored.
+ */
+#define LIBXL_HAVE_SPICE_VDAGENT 1
+
+/*
+ * LIBXL_HAVE_SPICE_USBREDIRECTION
+ *
+ * If defined, then the libxl_spice_info structure will contain an integer type
+ * field: usbredirection. This value defines if Spice usbredirection is enabled
+ * and with how much channels.
+ *
+ * If this is not defined, the Spice usbredirection support is ignored.
+ */
+#define LIBXL_HAVE_SPICE_USBREDIREDIRECTION 1
+
+/*
+ * LIBXL_HAVE_SPICE_IMAGECOMPRESSION
+ *
+ * If defined, then the libxl_spice_info structure will contain a string type
+ * field: image_compression. This value defines what Spice image compression
+ * is used.
+ *
+ * If this is not defined, the Spice image compression setting support is ignored.
+ */
+#define LIBXL_HAVE_SPICE_IMAGECOMPRESSION 1
+
+/*
+ * LIBXL_HAVE_SPICE_STREAMINGVIDEO
+ *
+ * If defined, then the libxl_spice_info structure will contain a string type
+ * field: streaming_video. This value defines what Spice streaming video setting
+ * is used.
+ *
+ * If this is not defined, the Spice streaming video setting support is ignored.
+ */
+#define LIBXL_HAVE_SPICE_STREAMINGVIDEO 1
+
+/*
+ * LIBXL_HAVE_HVM_HDTYPE
+ *
+ * If defined, then the u.hvm structure will contain a enum type
+ * hdtype.
+ */
+#define LIBXL_HAVE_HVM_HDTYPE 1
+
+/*
+ * LIBXL_HAVE_DOMAIN_CREATE_RESTORE_PARAMS 1
+ *
+ * If this is defined, libxl_domain_create_restore()'s API has changed to
+ * include a params structure.
+ */
+#define LIBXL_HAVE_DOMAIN_CREATE_RESTORE_PARAMS 1
+
+/*
+ * LIBXL_HAVE_DOMAIN_CREATE_RESTORE_SEND_BACK_FD 1
+ *
+ * If this is defined, libxl_domain_create_restore()'s API includes the
+ * send_back_fd param. This is used only with COLO, for the libxl migration
+ * back channel; other callers should pass -1.
+ */
+#define LIBXL_HAVE_DOMAIN_CREATE_RESTORE_SEND_BACK_FD 1
+
+/*
+ * LIBXL_HAVE_DRIVER_DOMAIN_CREATION 1
+ *
+ * If this is defined, libxl_domain_create_info contains a driver_domain
+ * field that can be used to tell libxl that the domain that is going
+ * to be created is a driver domain, so the necessary actions are taken.
+ */
+#define LIBXL_HAVE_DRIVER_DOMAIN_CREATION 1
+
+/*
+ * LIBXL_HAVE_SIGCHLD_SELECTIVE_REAP
+ *
+ * If this is defined:
+ *
+ * Firstly, the enum libxl_sigchld_owner (in libxl_event.h) has the
+ * value libxl_sigchld_owner_libxl_always_selective_reap which may be
+ * passed to libxl_childproc_setmode in hooks->chldmode.
+ *
+ * Secondly, the function libxl_childproc_sigchld_occurred exists.
+ */
+#define LIBXL_HAVE_SIGCHLD_OWNER_SELECTIVE_REAP 1
+
+/*
+ * LIBXL_HAVE_SIGCHLD_SHARING
+ *
+ * If this is defined, it is permissible for multiple libxl ctxs
+ * to simultaneously "own" SIGCHLD. See "Subprocess handling"
+ * in libxl_event.h.
+ */
+#define LIBXL_HAVE_SIGCHLD_SHARING 1
+
+/*
+ * LIBXL_HAVE_NO_SUSPEND_RESUME
+ *
+ * Is this is defined then the platform has no support for saving,
+ * restoring or migrating a domain. In this case the related functions
+ * should be expected to return failure. That is:
+ * - libxl_domain_suspend
+ * - libxl_domain_resume
+ * - libxl_domain_remus_start
+ */
+#if defined(__arm__) || defined(__aarch64__)
+#define LIBXL_HAVE_NO_SUSPEND_RESUME 1
+#endif
+
+/*
+ * LIBXL_HAVE_DOMAIN_SUSPEND_ONLY
+ *
+ * If this is defined, function libxl_domains_suspend_only() is available.
+ */
+
+#define LIBXL_HAVE_DOMAIN_SUSPEND_ONLY 1
+
+/*
+ * LIBXL_HAVE_DEVICE_PCI_SEIZE
+ *
+ * If this is defined, then the libxl_device_pci struct will contain
+ * the "seize" boolean field. If this field is set, libxl_pci_add will
+ * check to see if the device is currently assigned to pciback, and if not,
+ * it will attempt to do so (unbinding the device from the existing driver).
+ */
+#define LIBXL_HAVE_DEVICE_PCI_SEIZE 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_KERNEL
+ *
+ * If this is defined, then the libxl_domain_build_info structure will
+ * contain 'kernel', 'ramdisk', 'cmdline' fields. 'kernel' is a string
+ * to indicate kernel image location, 'ramdisk' is a string to indicate
+ * ramdisk location, 'cmdline' is a string to indicate the paramters which
+ * would be appended to kernel image.
+ *
+ * Both PV guest and HVM guest can use these fields for direct kernel boot.
+ * But for compatibility reason, u.pv.kernel, u.pv.ramdisk and u.pv.cmdline
+ * still exist.
+ */
+#define LIBXL_HAVE_BUILDINFO_KERNEL 1
+
+/*
+ * LIBXL_HAVE_DEVICE_CHANNEL
+ *
+ * If this is defined, then the libxl_device_channel struct exists
+ * and channels can be attached to a domain. Channels manifest as consoles
+ * with names, see docs/misc/console.txt.
+ */
+#define LIBXL_HAVE_DEVICE_CHANNEL 1
+
+/*
+ * LIBXL_HAVE_AO_ABORT indicates the availability of libxl_ao_abort
+ */
+#define LIBXL_HAVE_AO_ABORT 1
+
+/* Functions annotated with LIBXL_EXTERNAL_CALLERS_ONLY may not be
+ * called from within libxl itself. Callers outside libxl, who
+ * do not #include libxl_internal.h, are fine. */
+#ifndef LIBXL_EXTERNAL_CALLERS_ONLY
+#define LIBXL_EXTERNAL_CALLERS_ONLY /* disappears for callers outside libxl */
+#endif
+
+/*
+ * LIBXL_HAVE_UUID_COPY_CTX_PARAM
+ *
+ * If this is defined, libxl_uuid_copy has changed to take a libxl_ctx
+ * structure.
+ */
+#define LIBXL_HAVE_UUID_COPY_CTX_PARAM 1
+
+/*
+ * LIBXL_HAVE_SSID_LABEL
+ *
+ * If this is defined, then libxl IDL contains string of XSM security
+ * label in all XSM related structures.
+ *
+ * If set this string takes precedence over the numeric field.
+ */
+#define LIBXL_HAVE_SSID_LABEL 1
+
+/*
+ * LIBXL_HAVE_CPUPOOL_NAME
+ *
+ * If this is defined, then libxl IDL contains string of CPU pool
+ * name in all CPU pool related structures.
+ *
+ * If set this string takes precedence over the numeric field.
+ */
+#define LIBXL_HAVE_CPUPOOL_NAME 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_SERIAL_LIST
+ *
+ * If this is defined, then the libxl_domain_build_info structure will
+ * contain hvm.serial_list, a libxl_string_list type that contains
+ * a list of serial ports to specify on the qemu command-line.
+ *
+ * If it is set, callers may use either hvm.serial or
+ * hvm.serial_list, but not both; if both are set, libxl will
+ * throw an error.
+ *
+ * If this is not defined, callers can only use hvm.serial. Note
+ * that this means only one serial port can be added at domain build time.
+ */
+#define LIBXL_HAVE_BUILDINFO_SERIAL_LIST 1
+
+/*
+ * LIBXL_HAVE_ALTP2M
+ * If this is defined, then libxl supports alternate p2m functionality.
+ */
+#define LIBXL_HAVE_ALTP2M 1
+
+/*
+ * LIBXL_HAVE_REMUS
+ * If this is defined, then libxl supports remus.
+ */
+#define LIBXL_HAVE_REMUS 1
+
+/*
+ * LIBXL_HAVE_COLO_USERSPACE_PROXY
+ * If this is defined, then libxl supports COLO userspace proxy.
+ */
+#define LIBXL_HAVE_COLO_USERSPACE_PROXY 1
+
+typedef uint8_t libxl_mac[6];
+#define LIBXL_MAC_FMT "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx"
+#define LIBXL_MAC_FMTLEN ((2*6)+5) /* 6 hex bytes plus 5 colons */
+#define LIBXL_MAC_BYTES(mac) mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]
+void libxl_mac_copy(libxl_ctx *ctx, libxl_mac *dst, const libxl_mac *src);
+
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * LIBXL_HAVE_PSR_CMT
+ *
+ * If this is defined, the Cache Monitoring Technology feature is supported.
+ */
+#define LIBXL_HAVE_PSR_CMT 1
+
+/*
+ * LIBXL_HAVE_PSR_MBM
+ *
+ * If this is defined, the Memory Bandwidth Monitoring feature is supported.
+ */
+#define LIBXL_HAVE_PSR_MBM 1
+
+/*
+ * LIBXL_HAVE_PSR_CAT
+ *
+ * If this is defined, the Cache Allocation Technology feature is supported.
+ */
+#define LIBXL_HAVE_PSR_CAT 1
+
+/*
+ * LIBXL_HAVE_PSR_CDP
+ *
+ * If this is defined, the Code and Data Prioritization feature is supported.
+ */
+#define LIBXL_HAVE_PSR_CDP 1
+
+/*
+ * LIBXL_HAVE_PSR_L2_CAT
+ *
+ * If this is defined, the L2 Cache Allocation Technology feature is supported.
+ */
+#define LIBXL_HAVE_PSR_L2_CAT 1
+
+/*
+ * LIBXL_HAVE_PSR_GENERIC
+ *
+ * If this is defined, the Memory Bandwidth Allocation feature is supported.
+ * The following public functions are available:
+ * libxl_psr_{set/get}_val
+ * libxl_psr_get_hw_info
+ * libxl_psr_hw_info_list_free
+ */
+#define LIBXL_HAVE_PSR_GENERIC 1
+
+/*
+ * LIBXL_HAVE_MCA_CAPS
+ *
+ * If this is defined, setting MCA capabilities for HVM domain is supported.
+ */
+#define LIBXL_HAVE_MCA_CAPS 1
+#endif
+
+/*
+ * LIBXL_HAVE_PCITOPOLOGY
+ *
+ * If this is defined, then interface to query hypervisor about PCI device
+ * topology is available.
+ */
+#define LIBXL_HAVE_PCITOPOLOGY 1
+
+/*
+ * LIBXL_HAVE_SOCKET_BITMAP
+ *
+ * If this is defined, then libxl_socket_bitmap_alloc and
+ * libxl_get_online_socketmap exist.
+ */
+#define LIBXL_HAVE_SOCKET_BITMAP 1
+
+/*
+ * LIBXL_HAVE_SRM_V2
+ *
+ * If this is defined, then the libxl_domain_create_restore() interface takes
+ * a "stream_version" parameter and supports a value of 2.
+ *
+ * libxl_domain_suspend() will produce a v2 stream.
+ */
+#define LIBXL_HAVE_SRM_V2 1
+
+/*
+ * LIBXL_HAVE_SRM_V1
+ *
+ * In the case that LIBXL_HAVE_SRM_V2 is set, LIBXL_HAVE_SRM_V1
+ * indicates that libxl_domain_create_restore() can handle a "stream_version"
+ * parameter of 1, and convert the stream format automatically.
+ */
+#define LIBXL_HAVE_SRM_V1 1
+
+/*
+ * libxl_domain_build_info has the u.hvm.gfx_passthru_kind field and
+ * the libxl_gfx_passthru_kind enumeration is defined.
+*/
+#define LIBXL_HAVE_GFX_PASSTHRU_KIND
+
+/*
+ * LIBXL_HAVE_CHECKPOINTED_STREAM
+ *
+ * If this is defined, then libxl_checkpointed_stream exists.
+ */
+#define LIBXL_HAVE_CHECKPOINTED_STREAM 1
+
+/*
+ * LIBXL_HAVE_BUILDINFO_HVM_SYSTEM_FIRMWARE
+ *
+ * libxl_domain_build_info has u.hvm.system_firmware field which can be use
+ * to provide a different firmware blob (like SeaBIOS or OVMF).
+ */
+#define LIBXL_HAVE_BUILDINFO_HVM_SYSTEM_FIRMWARE
+
+/*
+ * ERROR_REMUS_XXX error code only exists from Xen 4.5, Xen 4.6 and it
+ * is changed to ERROR_CHECKPOINT_XXX in Xen 4.7
+ */
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION >= 0x040500 \
+ && LIBXL_API_VERSION < 0x040700
+#define ERROR_REMUS_DEVOPS_DOES_NOT_MATCH \
+ ERROR_CHECKPOINT_DEVOPS_DOES_NOT_MATCH
+#define ERROR_REMUS_DEVICE_NOT_SUPPORTED \
+ ERROR_CHECKPOINT_DEVICE_NOT_SUPPORTED
+#endif
+
+/*
+ * LIBXL_HAVE_VGA_INTERFACE_TYPE_UNKNOWN
+ *
+ * In the case that LIBXL_HAVE_VGA_INTERFACE_TYPE_UNKNOWN is set the
+ * libxl_vga_interface_type enumeration type contains a
+ * LIBXL_VGA_INTERFACE_TYPE_UNKNOWN identifier. This is used to signal
+ * that a libxl_vga_interface_type type has not been initialized yet.
+ */
+#define LIBXL_HAVE_VGA_INTERFACE_TYPE_UNKNOWN 1
+
+/*
+ * LIBXL_HAVE_BYTEARRAY_UUID
+ *
+ * If this is defined, the internal member of libxl_uuid is defined
+ * as a 16 byte array that contains the UUID in big endian format.
+ * Also, the same structure layout is used across all OSes.
+ */
+#define LIBXL_HAVE_BYTEARRAY_UUID 1
+
+/*
+ * LIBXL_HAVE_MEMKB_64BITS
+ *
+ * If this is defined libxl_set_memory_target(), libxl_domain_setmaxmem()
+ * and libxl_wait_for_free_memory() will take a 64 bit value for the memory
+ * size parameter.
+ * From Xen 4.8 on libxl_get_memory_target(), libxl_domain_need_memory() and
+ * libxl_get_free_memory() return the memory size in a 64 bit value, too.
+ */
+#define LIBXL_HAVE_MEMKB_64BITS 1
+
+/*
+ * LIBXL_HAVE_QED
+ *
+ * If this is defined QED disk formats can be used for both HVM and PV guests.
+ */
+#define LIBXL_HAVE_QED 1
+
+/*
+ * LIBXL_HAVE_SET_PARAMETERS
+ *
+ * If this is defined setting hypervisor parameters is supported.
+ */
+#define LIBXL_HAVE_SET_PARAMETERS 1
+
+/*
+ * LIBXL_HAVE_PV_SHIM
+ *
+ * If this is defined, libxl_domain_build_info's pvh type information
+ * contains members pvshim, pvshim_path, pvshim_cmdline, pvshim_extra.
+ */
+#define LIBXL_HAVE_PV_SHIM 1
+
+/*
+ * LIBXL_HAVE_PVCALLS
+ *
+ * If this is defined, libxl supports creating pvcalls interfaces.
+ */
+#define LIBXL_HAVE_PVCALLS 1
+
+/*
+ * LIBXL_HAVE_FN_USING_QMP_ASYNC
+ *
+ * This define indicates that some function's API has changed and have an
+ * extra parameter "ao_how" which means that the function can be executed
+ * asynchronously. Those functions are:
+ * libxl_domain_pause()
+ * libxl_domain_unpause()
+ * libxl_send_trigger()
+ * libxl_set_vcpuonline()
+ * libxl_retrieve_domain_configuration()
+ * libxl_qemu_monitor_command()
+ * libxl_domain_shutdown()
+ * libxl_domain_reboot()
+ */
+#define LIBXL_HAVE_FN_USING_QMP_ASYNC 1
+
+/*
+ * LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONFIG
+ *
+ * If this is set, libxl_domain_need_memory takes a
+ * libxl_domain_config* (non-const) and uint32_t domid_for_logging
+ * (instead of a const libxl_domain_build_info*).
+ *
+ * If this is set, there is no need to call
+ * libxl_get_required_shadow_memory and instead the caller should
+ * simply leave shadow_memkb set to LIBXL_MEMKB_DEFAULT and allow
+ * libxl to fill in a suitable default in the usual way.
+ */
+#define LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONFIG
+
+/*
+ * LIBXL_HAVE_CREATEINFO_DOMID
+ *
+ * libxl_domain_create_new() and libxl_domain_create_restore() will use
+ * a domid specified in libxl_domain_create_info.
+ */
+#define LIBXL_HAVE_CREATEINFO_DOMID
+
+/*
+ * LIBXL_HAVE_CREATEINFO_XEND_SUSPEND_EVTCHN_COMPAT
+ *
+ * libxl_domain_create_info contains a boolean 'xend_suspend_evtchn_compat'
+ * value to control creation of the xenstore path for a domain's suspend
+ * event channel.
+ */
+#define LIBXL_HAVE_CREATEINFO_XEND_SUSPEND_EVTCHN_COMPAT
+
+typedef char **libxl_string_list;
+void libxl_string_list_dispose(libxl_string_list *sl);
+int libxl_string_list_length(const libxl_string_list *sl);
+void libxl_string_list_copy(libxl_ctx *ctx, libxl_string_list *dst,
+ const libxl_string_list *src);
+
+typedef char **libxl_key_value_list;
+void libxl_key_value_list_dispose(libxl_key_value_list *kvl);
+int libxl_key_value_list_length(const libxl_key_value_list *kvl);
+void libxl_key_value_list_copy(libxl_ctx *ctx,
+ libxl_key_value_list *dst,
+ const libxl_key_value_list *src);
+
+typedef uint32_t libxl_hwcap[8];
+void libxl_hwcap_copy(libxl_ctx *ctx, libxl_hwcap *dst, const libxl_hwcap *src);
+
+typedef uint64_t libxl_ev_user;
+
+typedef struct {
+ uint32_t size; /* number of bytes in map */
+ uint8_t *map;
+} libxl_bitmap;
+void libxl_bitmap_init(libxl_bitmap *map);
+void libxl_bitmap_dispose(libxl_bitmap *map);
+
+/*
+ * libxl_cpuid_policy is opaque in the libxl ABI. Users of both libxl and
+ * libxc may not make assumptions about xc_xend_cpuid.
+ */
+typedef struct xc_xend_cpuid libxl_cpuid_policy;
+typedef libxl_cpuid_policy * libxl_cpuid_policy_list;
+void libxl_cpuid_dispose(libxl_cpuid_policy_list *cpuid_list);
+int libxl_cpuid_policy_list_length(const libxl_cpuid_policy_list *l);
+void libxl_cpuid_policy_list_copy(libxl_ctx *ctx,
+ libxl_cpuid_policy_list *dst,
+ const libxl_cpuid_policy_list *src);
+
+#define LIBXL_PCI_FUNC_ALL (~0U)
+
+typedef uint32_t libxl_domid;
+typedef int libxl_devid;
+
+/*
+ * Formatting Enumerations.
+ *
+ * Each enumeration type libxl_E declares an associated lookup table
+ * libxl_E_string_table and a lookup function libxl_E_from_string.
+ */
+typedef struct {
+ const char *s;
+ int v;
+} libxl_enum_string_table;
+
+struct libxl_event;
+typedef LIBXL_TAILQ_ENTRY(struct libxl_event) libxl_ev_link;
+
+/*
+ * A boolean variable with an explicit default state.
+ *
+ * Users should treat this struct as opaque and use the following
+ * defined macros and accessor functions.
+ *
+ * To allow users of the library to naively select all defaults this
+ * state is represented as 0. False is < 0 and True is > 0.
+ */
+typedef struct {
+ int val;
+} libxl_defbool;
+
+void libxl_defbool_set(libxl_defbool *db, bool b);
+/* Resets to default */
+void libxl_defbool_unset(libxl_defbool *db);
+/* Sets db only if it is currently == default */
+void libxl_defbool_setdefault(libxl_defbool *db, bool b);
+bool libxl_defbool_is_default(libxl_defbool db);
+/* db must not be == default */
+bool libxl_defbool_val(libxl_defbool db);
+
+const char *libxl_defbool_to_string(libxl_defbool b);
+
+#define LIBXL_TIMER_MODE_DEFAULT -1
+#define LIBXL_MEMKB_DEFAULT ~0ULL
+
+/*
+ * We'd like to set a memory boundary to determine if we need to check
+ * any overlap with reserved device memory.
+ */
+#define LIBXL_RDM_MEM_BOUNDARY_MEMKB_DEFAULT (2048 * 1024)
+
+#define LIBXL_MS_VM_GENID_LEN 16
+typedef struct {
+ uint8_t bytes[LIBXL_MS_VM_GENID_LEN];
+} libxl_ms_vm_genid;
+
+#include "_libxl_types.h"
+
+const libxl_version_info* libxl_get_version_info(libxl_ctx *ctx);
+
+/*
+ * Some libxl operations can take a long time. These functions take a
+ * parameter to control their concurrency:
+ * libxl_asyncop_how *ao_how
+ *
+ * If ao_how==NULL, the function will be synchronous.
+ *
+ * If ao_how!=NULL, the function will set the operation going, and if
+ * this is successful will return 0. In this case the zero error
+ * response does NOT mean that the operation was successful; it just
+ * means that it has been successfully started. It will finish later,
+ * perhaps with an error.
+ *
+ * If ao_how->callback!=NULL, the callback will be called when the
+ * operation completes. The same rules as for libxl_event_hooks
+ * apply, including the reentrancy rules and the possibility of
+ * "disaster", except that libxl calls ao_how->callback instead of
+ * libxl_event_hooks.event_occurs. (See libxl_event.h.)
+ *
+ * If ao_how->callback==NULL, a libxl_event will be generated which
+ * can be obtained from libxl_event_wait or libxl_event_check. The
+ * event will have type OPERATION_COMPLETE (which is not used
+ * elsewhere).
+ *
+ * Note that it is possible for an asynchronous operation which is to
+ * result in a callback to complete during its initiating function
+ * call. In this case the initiating function will return 0
+ * indicating the at the operation is "in progress", even though by
+ * the time it returns the operation is complete and the callback has
+ * already happened.
+ *
+ * The application must set and use ao_how->for_event (which will be
+ * copied into libxl_event.for_user) or ao_how->for_callback (passed
+ * to the callback) to determine which operation finished, and it must
+ * of course check the rc value for errors.
+ *
+ * *ao_how does not need to remain valid after the initiating function
+ * returns. All other parameters must remain valid for the lifetime of
+ * the asynchronous operation, unless otherwise specified.
+ *
+ * Callbacks may occur on any thread in which the application calls
+ * libxl.
+ */
+
+typedef struct {
+ void (*callback)(libxl_ctx *ctx, int rc, void *for_callback);
+ union {
+ libxl_ev_user for_event; /* used if callback==NULL */
+ void *for_callback; /* passed to callback */
+ } u;
+} libxl_asyncop_how;
+
+/*
+ * Some more complex asynchronous operations can report intermediate
+ * progress. How this is to be reported is controlled, for each
+ * function, by a parameter
+ * libxl_asyncprogress_how *aop_FOO_how;
+ * for each kind of progress FOO supported by that function. Each
+ * such kind of progress is associated with an event type.
+ *
+ * The function description will document whether, when, and how
+ * many times, the intermediate progress will be reported, and
+ * what the corresponding event type(s) are.
+ *
+ * If aop_FOO_how==NULL, intermediate progress reports are discarded.
+ *
+ * If aop_FOO_how->callback==NULL, intermediate progress reports
+ * generate libxl events which can be obtained from libxl_event_wait
+ * or libxl_event_check.
+ *
+ * If aop_FOO_how->callback!=NULL, libxl will report intermediate
+ * progress by calling callback(ctx, &event, for_callback).
+ *
+ * The rules for these events are otherwise the same as those for
+ * ordinary events. The reentrancy and threading rules for the
+ * callback are the same as those for ao completion callbacks.
+ *
+ * Note that the callback, if provided, is responsible for freeing
+ * the event.
+ *
+ * If callbacks are requested, they will be made, and returned, before
+ * the long-running libxl operation is considered finished (so if the
+ * long-running libxl operation was invoked with ao_how==NULL then any
+ * callbacks will occur strictly before the long-running operation
+ * returns). However, the callbacks may occur on any thread.
+ *
+ * In general, otherwise, no promises are made about the relative
+ * order of callbacks in a multithreaded program. In particular
+ * different callbacks relating to the same long-running operation may
+ * be delivered out of order.
+ */
+
+typedef struct {
+ void (*callback)(libxl_ctx *ctx, libxl_event*, void *for_callback);
+ libxl_ev_user for_event; /* always used */
+ void *for_callback; /* passed to callback */
+} libxl_asyncprogress_how;
+
+/*
+ * It is sometimes possible to abort an asynchronous operation.
+ *
+ * libxl_ao_abort searches for an ongoing asynchronous operation whose
+ * ao_how is identical to *how, and tries to abort it. The return
+ * values from libxl_ao_abort are as follows:
+ *
+ * 0
+ *
+ * The operation was found, and attempts are being made to cut it
+ * short. However, it may still take some time to stop. It is
+ * also possible that the operation will nevertheless complete
+ * successfully.
+ *
+ * ERROR_NOTFOUND
+ *
+ * No matching ongoing operation was found. This might happen
+ * for an actual operation if the operation has already completed
+ * (perhaps on another thread). The call to libxl_ao_abort has
+ * had no effect.
+ *
+ * ERROR_ABORTED
+ *
+ * The operation has already been the subject of at least one
+ * call to libxl_ao_abort.
+ *
+ * If the operation was indeed cut short due to the abort request, it
+ * will complete, at some point in the future, with ERROR_ABORTED. In
+ * that case, depending on the operation it have performed some of the
+ * work in question and left the operation half-done. Consult the
+ * documentation for individual operations.
+ *
+ * Note that an aborted operation might still fail for other reasons
+ * even after the abort was requested.
+ *
+ * If your application is multithreaded you must not reuse an
+ * ao_how->for_event or ao_how->for_callback value (with a particular
+ * ao_how->callback) unless you are sure that none of your other
+ * threads are going to abort the previous operation using that
+ * value; otherwise you risk aborting the wrong operation if the
+ * intended target of the abort request completes in the meantime.
+ *
+ * It is possible to abort even an operation which is being performed
+ * synchronously, but since in that case how==NULL you had better only
+ * have one such operation, because it is not possible to tell them
+ * apart (and libxl_ao_abort will abort only the first one it finds).
+ * (And, if you want to do this, obviously the abort would have to be
+ * requested on a different thread.)
+ */
+int libxl_ao_abort(libxl_ctx *ctx, const libxl_asyncop_how *how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+
+#define LIBXL_VERSION 0
+
+/* context functions */
+int libxl_ctx_alloc(libxl_ctx **pctx, int version,
+ unsigned flags /* none currently defined */,
+ xentoollog_logger *lg);
+int libxl_ctx_free(libxl_ctx *ctx /* 0 is OK */);
+
+/* domain related functions */
+
+#define INVALID_DOMID ~0
+#define RANDOM_DOMID (INVALID_DOMID - 1)
+
+/* If the result is ERROR_ABORTED, the domain may or may not exist
+ * (in a half-created state). *domid will be valid and will be the
+ * domain id, or INVALID_DOMID, as appropriate */
+
+int libxl_domain_create_new(libxl_ctx *ctx, libxl_domain_config *d_config,
+ uint32_t *domid,
+ const libxl_asyncop_how *ao_how,
+ const libxl_asyncprogress_how *aop_console_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_domain_create_restore(libxl_ctx *ctx, libxl_domain_config *d_config,
+ uint32_t *domid, int restore_fd,
+ int send_back_fd,
+ const libxl_domain_restore_params *params,
+ const libxl_asyncop_how *ao_how,
+ const libxl_asyncprogress_how *aop_console_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040400
+
+static inline int libxl_domain_create_restore_0x040200(
+ libxl_ctx *ctx, libxl_domain_config *d_config,
+ uint32_t *domid, int restore_fd,
+ const libxl_asyncop_how *ao_how,
+ const libxl_asyncprogress_how *aop_console_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY
+{
+ libxl_domain_restore_params params;
+ int ret;
+
+ libxl_domain_restore_params_init(¶ms);
+
+ ret = libxl_domain_create_restore(
+ ctx, d_config, domid, restore_fd, -1, ¶ms, ao_how, aop_console_how);
+
+ libxl_domain_restore_params_dispose(¶ms);
+ return ret;
+}
+
+#define libxl_domain_create_restore libxl_domain_create_restore_0x040200
+
+#elif defined(LIBXL_API_VERSION) && LIBXL_API_VERSION >= 0x040400 \
+ && LIBXL_API_VERSION < 0x040700
+
+static inline int libxl_domain_create_restore_0x040400(
+ libxl_ctx *ctx, libxl_domain_config *d_config,
+ uint32_t *domid, int restore_fd,
+ const libxl_domain_restore_params *params,
+ const libxl_asyncop_how *ao_how,
+ const libxl_asyncprogress_how *aop_console_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY
+{
+ return libxl_domain_create_restore(ctx, d_config, domid, restore_fd,
+ -1, params, ao_how, aop_console_how);
+}
+
+#define libxl_domain_create_restore libxl_domain_create_restore_0x040400
+
+#endif
+
+int libxl_domain_soft_reset(libxl_ctx *ctx,
+ libxl_domain_config *d_config,
+ uint32_t domid,
+ const libxl_asyncop_how *ao_how,
+ const libxl_asyncprogress_how
+ *aop_console_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+ /* A progress report will be made via ao_console_how, of type
+ * domain_create_console_available, when the domain's primary
+ * console is available and can be connected to.
+ */
+
+void libxl_domain_config_init(libxl_domain_config *d_config);
+void libxl_domain_config_dispose(libxl_domain_config *d_config);
+
+/*
+ * Retrieve domain configuration and filled it in d_config. The
+ * returned configuration can be used to rebuild a domain. It only
+ * works with DomU.
+ */
+int libxl_retrieve_domain_configuration(libxl_ctx *ctx, uint32_t domid,
+ libxl_domain_config *d_config,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
+static inline int libxl_retrieve_domain_configuration_0x041200(
+ libxl_ctx *ctx, uint32_t domid, libxl_domain_config *d_config)
+{
+ return libxl_retrieve_domain_configuration(ctx, domid, d_config, NULL);
+}
+#define libxl_retrieve_domain_configuration \
+ libxl_retrieve_domain_configuration_0x041200
+#endif
+
+int libxl_domain_suspend(libxl_ctx *ctx, uint32_t domid, int fd,
+ int flags, /* LIBXL_SUSPEND_* */
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+#define LIBXL_SUSPEND_DEBUG 1
+#define LIBXL_SUSPEND_LIVE 2
+
+/*
+ * Only suspend domain, do not save its state to file, do not destroy it.
+ * Suspended domain can be resumed with libxl_domain_resume()
+ */
+int libxl_domain_suspend_only(libxl_ctx *ctx, uint32_t domid,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* @param suspend_cancel [from xenctrl.h:xc_domain_resume( @param fast )]
+ * If this parameter is true, use co-operative resume. The guest
+ * must support this.
+ */
+int libxl_domain_resume(libxl_ctx *ctx, uint32_t domid, int suspend_cancel,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/*
+ * This function doesn't return unless something has gone wrong with
+ * the replication to the secondary. If this function returns then the
+ * caller should resume the (primary) domain.
+ */
+int libxl_domain_remus_start(libxl_ctx *ctx, libxl_domain_remus_info *info,
+ uint32_t domid, int send_fd, int recv_fd,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl_domain_shutdown(libxl_ctx *ctx, uint32_t domid,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_domain_reboot(libxl_ctx *ctx, uint32_t domid,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
+static inline int libxl_domain_shutdown_0x041200(libxl_ctx *ctx,
+ uint32_t domid)
+{
+ return libxl_domain_shutdown(ctx, domid, NULL);
+}
+#define libxl_domain_shutdown libxl_domain_shutdown_0x041200
+static inline int libxl_domain_reboot_0x041200(libxl_ctx *ctx,
+ uint32_t domid)
+{
+ return libxl_domain_reboot(ctx, domid, NULL);
+}
+#define libxl_domain_reboot libxl_domain_reboot_0x041200
+#endif
+
+int libxl_domain_destroy(libxl_ctx *ctx, uint32_t domid,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_domain_preserve(libxl_ctx *ctx, uint32_t domid, libxl_domain_create_info *info, const char *name_suffix, libxl_uuid new_uuid);
+
+/* get max. number of cpus supported by hypervisor */
+int libxl_get_max_cpus(libxl_ctx *ctx);
+
+/* get the actual number of currently online cpus on the host */
+int libxl_get_online_cpus(libxl_ctx *ctx);
+ /* Beware that no locking or serialization is provided by libxl,
+ * so the information can be outdated as far as the function
+ * returns. If there are other entities in the system capable
+ * of onlining/offlining CPUs, it is up to the application
+ * to guarantee consistency, if that is important. */
+
+/* get max. number of NUMA nodes supported by hypervisor */
+int libxl_get_max_nodes(libxl_ctx *ctx);
+
+int libxl_domain_rename(libxl_ctx *ctx, uint32_t domid,
+ const char *old_name, const char *new_name);
+
+ /* if old_name is NULL, any old name is OK; otherwise we check
+ * transactionally that the domain has the old old name; if
+ * trans is not 0 we use caller's transaction and caller must do retries */
+
+int libxl_domain_pause(libxl_ctx *ctx, uint32_t domid,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_domain_unpause(libxl_ctx *ctx, uint32_t domid,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
+static inline int libxl_domain_pause_0x041200(
+ libxl_ctx *ctx, uint32_t domid)
+{
+ return libxl_domain_pause(ctx, domid, NULL);
+}
+static inline int libxl_domain_unpause_0x041200(
+ libxl_ctx *ctx, uint32_t domid)
+{
+ return libxl_domain_unpause(ctx, domid, NULL);
+}
+#define libxl_domain_pause libxl_domain_pause_0x041200
+#define libxl_domain_unpause libxl_domain_unpause_0x041200
+#endif
+
+
+int libxl_domain_core_dump(libxl_ctx *ctx, uint32_t domid,
+ const char *filename,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl_domain_setmaxmem(libxl_ctx *ctx, uint32_t domid, uint64_t target_memkb);
+int libxl_set_memory_target(libxl_ctx *ctx, uint32_t domid, int64_t target_memkb, int relative, int enforce);
+int libxl_get_memory_target(libxl_ctx *ctx, uint32_t domid, uint64_t *out_target);
+int libxl_get_memory_target_0x040700(libxl_ctx *ctx, uint32_t domid,
+ uint32_t *out_target)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/*
+ * WARNING
+ * This memory management API is unstable even in Xen 4.2.
+ * It has a numer of deficiencies and we intend to replace it.
+ *
+ * The semantics of these functions should not be relied on to be very
+ * coherent or stable. We will however endeavour to keep working
+ * existing programs which use them in roughly the same way as libxl.
+ */
+/* how much free memory in the system a domain needs to be built */
+int libxl_domain_need_memory(libxl_ctx *ctx,
+ libxl_domain_config *config
+ /* ^ will be partially defaulted */,
+ uint32_t domid_for_logging /* INVALID_DOMID ok */,
+ uint64_t *need_memkb);
+int libxl_domain_need_memory_0x041200(libxl_ctx *ctx,
+ const libxl_domain_build_info *b_info_in,
+ uint64_t *need_memkb);
+int libxl_domain_need_memory_0x040700(libxl_ctx *ctx,
+ const libxl_domain_build_info *b_info_in,
+ uint32_t *need_memkb)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+/* how much free memory is available in the system */
+int libxl_get_free_memory(libxl_ctx *ctx, uint64_t *memkb);
+int libxl_get_free_memory_0x040700(libxl_ctx *ctx, uint32_t *memkb)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+/* wait for a given amount of memory to be free in the system */
+int libxl_wait_for_free_memory(libxl_ctx *ctx, uint32_t domid, uint64_t memory_kb, int wait_secs);
+/*
+ * Wait for the memory target of a domain to be reached. Does not
+ * decrement wait_secs if the domain is making progress toward reaching
+ * the target. If the domain is not making progress, wait_secs is
+ * decremented. If the timeout expires before the target is reached, the
+ * function returns ERROR_FAIL.
+ *
+ * Older versions of this function (Xen 4.5 and older), decremented
+ * wait_secs even if the domain was making progress, resulting in far
+ * lower overall wait times. To make sure that your calling routine
+ * works with new and old implementations of the function, pass enough
+ * time for the guest to reach its target as an argument.
+ */
+int libxl_wait_for_memory_target(libxl_ctx *ctx, uint32_t domid, int wait_secs);
+
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040800
+#define libxl_get_memory_target libxl_get_memory_target_0x040700
+#define libxl_domain_need_memory libxl_domain_need_memory_0x040700
+#define libxl_get_free_memory libxl_get_free_memory_0x040700
+#elif defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
+#define libxl_domain_need_memory libxl_domain_need_memory_0x041200
+#endif
+
+int libxl_vncviewer_exec(libxl_ctx *ctx, uint32_t domid, int autopass);
+
+/*
+ * If notify_fd is not -1, xenconsole will write 0x00 to it to nofity
+ * the caller that it has connected to the guest console.
+ */
+int libxl_console_exec(libxl_ctx *ctx, uint32_t domid, int cons_num,
+ libxl_console_type type, int notify_fd);
+/* libxl_primary_console_exec finds the domid and console number
+ * corresponding to the primary console of the given vm, then calls
+ * libxl_console_exec with the right arguments (domid might be different
+ * if the guest is using stubdoms).
+ * This function can be called after creating the device model, in
+ * case of HVM guests, and before libxl_run_bootloader in case of PV
+ * guests using pygrub.
+ * If notify_fd is not -1, xenconsole will write 0x00 to it to nofity
+ * the caller that it has connected to the guest console.
+ */
+int libxl_primary_console_exec(libxl_ctx *ctx, uint32_t domid_vm,
+ int notify_fd);
+
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040800
+
+static inline int libxl_console_exec_0x040700(libxl_ctx *ctx,
+ uint32_t domid, int cons_num,
+ libxl_console_type type)
+{
+ return libxl_console_exec(ctx, domid, cons_num, type, -1);
+}
+#define libxl_console_exec libxl_console_exec_0x040700
+
+static inline int libxl_primary_console_exec_0x040700(libxl_ctx *ctx,
+ uint32_t domid_vm)
+{
+ return libxl_primary_console_exec(ctx, domid_vm, -1);
+}
+#define libxl_primary_console_exec libxl_primary_console_exec_0x040700
+
+#endif
+
+/* libxl_console_get_tty retrieves the specified domain's console tty path
+ * and stores it in path. Caller is responsible for freeing the memory.
+ */
+int libxl_console_get_tty(libxl_ctx *ctx, uint32_t domid, int cons_num,
+ libxl_console_type type, char **path);
+
+/* libxl_primary_console_get_tty retrieves the specified domain's primary
+ * console tty path and stores it in path. Caller is responsible for freeing
+ * the memory.
+ */
+int libxl_primary_console_get_tty(libxl_ctx *ctx, uint32_t domid_vm, char **path);
+
+/* May be called with info_r == NULL to check for domain's existence.
+ * Returns ERROR_DOMAIN_NOTFOUND if domain does not exist (used to return
+ * ERROR_INVAL for this scenario). */
+int libxl_domain_info(libxl_ctx*, libxl_dominfo *info_r,
+ uint32_t domid);
+
+/* These functions each return (on success) an array of elements,
+ * and the length via the int* out parameter. These arrays and
+ * their contents come from malloc, and must be freed with the
+ * corresponding libxl_THING_list_free function.
+ */
+libxl_dominfo * libxl_list_domain(libxl_ctx*, int *nb_domain_out);
+void libxl_dominfo_list_free(libxl_dominfo *list, int nb_domain);
+
+libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx*, int *nb_pool_out);
+void libxl_cpupoolinfo_list_free(libxl_cpupoolinfo *list, int nb_pool);
+
+libxl_vminfo * libxl_list_vm(libxl_ctx *ctx, int *nb_vm_out);
+void libxl_vminfo_list_free(libxl_vminfo *list, int nb_vm);
+
+#define LIBXL_CPUTOPOLOGY_INVALID_ENTRY (~(uint32_t)0)
+libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nb_cpu_out);
+void libxl_cputopology_list_free(libxl_cputopology *, int nb_cpu);
+
+#define LIBXL_PCITOPOLOGY_INVALID_ENTRY (~(uint32_t)0)
+libxl_pcitopology *libxl_get_pci_topology(libxl_ctx *ctx, int *num_devs);
+void libxl_pcitopology_list_free(libxl_pcitopology *, int num_devs);
+
+#define LIBXL_NUMAINFO_INVALID_ENTRY (~(uint32_t)0)
+libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr);
+void libxl_numainfo_list_free(libxl_numainfo *, int nr);
+
+libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
+ int *nb_vcpu, int *nr_cpus_out);
+void libxl_vcpuinfo_list_free(libxl_vcpuinfo *, int nr_vcpus);
+
+/*
+ * Devices
+ * =======
+ *
+ * Each device is represented by a libxl_device_<TYPE> data structure
+ * which is defined via the IDL. In addition some devices have an
+ * additional data type libxl_device_<TYPE>_getinfo which contains
+ * further runtime information about the device.
+ *
+ * In addition to the general methods available for libxl types (see
+ * "libxl types" above) a common set of methods are available for each
+ * device type. These are described below.
+ *
+ * Querying
+ * --------
+ *
+ * libxl_device_<type>_list(ctx, domid, nr):
+ *
+ * Returns an array of libxl_device_<type> length nr representing
+ * the devices attached to the specified domain.
+ *
+ * libxl_device_<type>_getinfo(ctx, domid, device, info):
+ *
+ * Initialises info with details of the given device which must be
+ * attached to the specified domain.
+ *
+ * Creation / Control
+ * ------------------
+ *
+ * libxl_device_<type>_add(ctx, domid, device):
+ *
+ * Adds the given device to the specified domain. This can be called
+ * while the guest is running (hotplug) or before boot (coldplug).
+ *
+ * This function only sets up the device but does not wait for the
+ * domain to connect to the device and therefore cannot block on the
+ * guest.
+ *
+ * device is an in/out parameter: fields left unspecified when the
+ * structure is passed in are filled in with appropriate values for
+ * the device created.
+ *
+ * libxl_device_<type>_destroy(ctx, domid, device):
+ *
+ * Removes the given device from the specified domain without guest
+ * co-operation. It is guest specific what affect this will have on
+ * a running guest.
+ *
+ * This function does not interact with the guest and therefore
+ * cannot block on the guest.
+ *
+ * libxl_device_<type>_remove(ctx, domid, device):
+ *
+ * Removes the given device from the specified domain by performing
+ * an orderly unplug with guest co-operation. This requires that the
+ * guest is running.
+ *
+ * This method is currently synchronous and therefore can block
+ * while interacting with the guest. There is a time-out of 10s on
+ * this interaction after which libxl_device_<type>_destroy()
+ * semantics apply.
+ *
+ * libxl_device_<type>_safe_remove(ctx, domid, device):
+ *
+ * This has the same semantics as libxl_device_<type>_remove() but,
+ * in the event of hitting the 10s time-out, this function will fail.
+ *
+ * Controllers
+ * -----------
+ *
+ * Most devices are treated individually. Some classes of device,
+ * however, like USB or SCSI, inherently have the need to have a
+ * hierarchy of different levels, with lower-level devices "attached"
+ * to higher-level ones. USB for instance has "controllers" at the
+ * top, which have buses, on which are devices, which consist of
+ * multiple interfaces. SCSI has "hosts" at the top, then buses,
+ * targets, and LUNs.
+ *
+ * In that case, for each <class>, there will be a set of functions
+ * and types for each <level>. For example, for <class>=usb, there
+ * may be <levels> ctrl (controller) and dev (device), with ctrl being
+ * level 0.
+ *
+ * libxl_device_<class><level0>_<function> will act more or
+ * less like top-level non-bus devices: they will either create or
+ * accept a libxl_devid which will be unique within the
+ * <class><level0> libxl_devid namespace.
+ *
+ * Lower-level devices must have a unique way to be identified. One
+ * way to do this would be to name it via the name of the next level
+ * up plus an index; for instance, <ctrl devid, port number>. Another
+ * way would be to have another devid namespace for that level. This
+ * identifier will be used for queries and removals.
+ *
+ * Lower-level devices will include in their
+ * libxl_device_<class><level> struct a field referring to the unique
+ * index of the level above. For instance, libxl_device_usbdev might
+ * contain the controller devid.
+ *
+ * In the case where there are multiple different ways to implement a
+ * given device -- for instance, one which is fully PV and one which
+ * uses an emulator -- the controller will contain a field which
+ * specifies what type of implementation is used. The implementations
+ * of individual devices will be known by the controller to which they
+ * are attached.
+ *
+ * If libxl_device_<class><level>_add receives an empty reference to
+ * the level above, it may return an error. Or it may (but is not
+ * required to) automatically choose a suitable device in the level
+ * above to which to attach the new device at this level. It may also
+ * (but is not required to) automatically create a new device at the
+ * level above if no suitable devices exist. Each class should
+ * document its behavior.
+ *
+ * libxl_device_<class><level>_list will list all devices of <class>
+ * at <level> in the domain. For example, libxl_device_usbctrl_list
+ * will list all usb controllers; libxl_class_usbdev_list will list
+ * all usb devices across all controllers.
+ *
+ * For each class, the domain config file will contain a single list
+ * for each level. libxl will first iterate through the list of
+ * top-level devices, then iterate through each level down in turn,
+ * adding devices to devices in the level above. For instance, there
+ * will be one list for all usb controllers, and one list for all usb
+ * devices.
+ *
+ * If libxl_device_<class><level>_add automatically creates
+ * higher-level devices as necessary, then it is permissible for the
+ * higher-level lists to be empty and the device list to have devices
+ * with the field containing a reference to the higher level device
+ * uninitialized.
+ */
+
+/* Disks */
+int libxl_device_disk_add(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_disk *disk,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_disk_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_disk *disk,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_disk_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_disk *disk,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_disk_safe_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_disk *disk,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+libxl_device_disk *libxl_device_disk_list(libxl_ctx *ctx,
+ uint32_t domid, int *num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+void libxl_device_disk_list_free(libxl_device_disk* list, int num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_disk_getinfo(libxl_ctx *ctx, uint32_t domid,
+ const libxl_device_disk *disk, libxl_diskinfo *diskinfo)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/*
+ * Insert a CD-ROM device. A device corresponding to disk must already
+ * be attached to the guest.
+ */
+int libxl_cdrom_insert(libxl_ctx *ctx, uint32_t domid, libxl_device_disk *disk,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/*
+ * USB
+ *
+ * For each device removed or added, one of these protocols is available:
+ * - PV (i.e., PVUSB)
+ * - DEVICEMODEL (i.e, qemu)
+ *
+ * PV is available for either PV or HVM domains. DEVICEMODEL is only
+ * available for HVM domains. The caller can additionally specify
+ * "AUTO", in which case the library will try to determine the best
+ * protocol automatically.
+ *
+ * At the moment, the only protocol implemented is PV.
+ *
+ * One can add/remove USB controllers to/from guest, and attach/detach USB
+ * devices to/from USB controllers.
+ *
+ * To add USB controllers and USB devices, one can adding USB controllers
+ * first and then attaching USB devices to some USB controller, or adding
+ * USB devices to guest directly, it will automatically create a USB
+ * controller for USB devices to attach.
+ *
+ * To remove USB controllers or USB devices, one can remove USB devices
+ * under USB controller one by one and then remove USB controller, or
+ * remove USB controller directly, it will remove all USB devices under
+ * it automatically.
+ *
+ */
+/* USB Controllers*/
+int libxl_device_usbctrl_add(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_usbctrl *usbctrl,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl_device_usbctrl_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_usbctrl *usbctrl,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl_device_usbctrl_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_usbctrl *usbctrl,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+libxl_device_usbctrl *libxl_device_usbctrl_list(libxl_ctx *ctx,
+ uint32_t domid, int *num);
+
+void libxl_device_usbctrl_list_free(libxl_device_usbctrl *list, int nr);
+
+
+int libxl_device_usbctrl_getinfo(libxl_ctx *ctx, uint32_t domid,
+ const libxl_device_usbctrl *usbctrl,
+ libxl_usbctrlinfo *usbctrlinfo);
+
+/* USB Devices */
+
+int libxl_device_usbdev_add(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_usbdev *usbdev,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+int libxl_device_usbdev_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_usbdev *usbdev,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+libxl_device_usbdev *
+libxl_device_usbdev_list(libxl_ctx *ctx, uint32_t domid, int *num);
+
+void libxl_device_usbdev_list_free(libxl_device_usbdev *list, int nr);
+
+/* Network Interfaces */
+int libxl_device_nic_add(libxl_ctx *ctx, uint32_t domid, libxl_device_nic *nic,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_nic_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_nic *nic,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_nic_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_nic *nic,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+libxl_device_nic *libxl_device_nic_list(libxl_ctx *ctx,
+ uint32_t domid, int *num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+void libxl_device_nic_list_free(libxl_device_nic* list, int num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_nic_getinfo(libxl_ctx *ctx, uint32_t domid,
+ const libxl_device_nic *nic, libxl_nicinfo *nicinfo)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/*
+ * Virtual Channels
+ * Channels manifest as consoles with names, see docs/misc/channels.txt
+ */
+libxl_device_channel *libxl_device_channel_list(libxl_ctx *ctx,
+ uint32_t domid,
+ int *num);
+int libxl_device_channel_getinfo(libxl_ctx *ctx, uint32_t domid,
+ const libxl_device_channel *channel,
+ libxl_channelinfo *channelinfo);
+
+/* Virtual TPMs */
+int libxl_device_vtpm_add(libxl_ctx *ctx, uint32_t domid, libxl_device_vtpm *vtpm,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vtpm_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vtpm *vtpm,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vtpm_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vtpm *vtpm,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+libxl_device_vtpm *libxl_device_vtpm_list(libxl_ctx *ctx,
+ uint32_t domid, int *num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+void libxl_device_vtpm_list_free(libxl_device_vtpm*, int num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vtpm_getinfo(libxl_ctx *ctx, uint32_t domid,
+ const libxl_device_vtpm *vtpm, libxl_vtpminfo *vtpminfo)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* Virtual displays */
+int libxl_device_vdispl_add(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vdispl *displ,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vdispl_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vdispl *vdispl,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vdispl_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vdispl *vdispl,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+libxl_device_vdispl *libxl_device_vdispl_list(libxl_ctx *ctx,
+ uint32_t domid, int *num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+void libxl_device_vdispl_list_free(libxl_device_vdispl* list, int num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vdispl_getinfo(libxl_ctx *ctx, uint32_t domid,
+ const libxl_device_vdispl *vdispl,
+ libxl_vdisplinfo *vdisplinfo)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* Virtual sounds */
+int libxl_device_vsnd_add(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vsnd *vsnd,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vsnd_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vsnd *vsnd,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vsnd_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vsnd *vsnd,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+libxl_device_vsnd *libxl_device_vsnd_list(libxl_ctx *ctx,
+ uint32_t domid, int *num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+void libxl_device_vsnd_list_free(libxl_device_vsnd* list, int num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vsnd_getinfo(libxl_ctx *ctx, uint32_t domid,
+ const libxl_device_vsnd *vsnd,
+ libxl_vsndinfo *vsndlinfo)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* Keyboard */
+int libxl_device_vkb_add(libxl_ctx *ctx, uint32_t domid, libxl_device_vkb *vkb,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vkb_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vkb *vkb,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vkb_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vkb *vkb,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+libxl_device_vkb *libxl_device_vkb_list(libxl_ctx *ctx,
+ uint32_t domid, int *num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+void libxl_device_vkb_list_free(libxl_device_vkb* list, int num)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vkb_getinfo(libxl_ctx *ctx, uint32_t domid,
+ const libxl_device_vkb *vkb,
+ libxl_vkbinfo *vkbinfo)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* Framebuffer */
+int libxl_device_vfb_add(libxl_ctx *ctx, uint32_t domid, libxl_device_vfb *vfb,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vfb_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vfb *vfb,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_vfb_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_vfb *vfb,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* 9pfs */
+int libxl_device_p9_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_p9 *p9,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_p9_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_p9 *p9,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* pvcalls interface */
+int libxl_device_pvcallsif_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_pvcallsif *pvcallsif,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_pvcallsif_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_pvcallsif *pvcallsif,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* PCI Passthrough */
+int libxl_device_pci_add(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_pci *pcidev,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_pci_remove(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_pci *pcidev,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+int libxl_device_pci_destroy(libxl_ctx *ctx, uint32_t domid,
+ libxl_device_pci *pcidev,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+libxl_device_pci *libxl_device_pci_list(libxl_ctx *ctx, uint32_t domid,
+ int *num);
+
+/*
+ * Turns the current process into a backend device service daemon
+ * for a driver domain.
+ *
+ * From a libxl API point of view, this starts a long-running
+ * operation. That operation consists of "being a driver domain"
+ * and never completes.
+ *
+ * Attempting to abort this operation is not advisable; proper
+ * shutdown of the driver domain task is not supported.
+ */
+int libxl_device_events_handler(libxl_ctx *ctx,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/*
+ * Functions related to making devices assignable -- that is, bound to
+ * the pciback driver, ready to be given to a guest via
+ * libxl_pci_device_add.
+ *
+ * - ..._add() will unbind the device from its current driver (if
+ * already bound) and re-bind it to pciback; at that point it will be
+ * ready to be assigned to a VM. If rebind is set, it will store the
+ * path to the old driver in xenstore so that it can be handed back to
+ * dom0 on restore.
+ *
+ * - ..._remove() will unbind the device from pciback, and if
+ * rebind is non-zero, attempt to assign it back to the driver
+ * from whence it came.
+ *
+ * - ..._list() will return a list of the PCI devices available to be
+ * assigned.
+ *
+ * add and remove are idempotent: if the device in question is already
+ * added or is not bound, the functions will emit a warning but return
+ * SUCCESS.
+ */
+int libxl_device_pci_assignable_add(libxl_ctx *ctx, libxl_device_pci *pcidev, int rebind);
+int libxl_device_pci_assignable_remove(libxl_ctx *ctx, libxl_device_pci *pcidev, int rebind);
+libxl_device_pci *libxl_device_pci_assignable_list(libxl_ctx *ctx, int *num);
+
+/* CPUID handling */
+int libxl_cpuid_parse_config(libxl_cpuid_policy_list *cpuid, const char* str);
+int libxl_cpuid_parse_config_xend(libxl_cpuid_policy_list *cpuid,
+ const char* str);
+#if LIBXL_API_VERSION < 0x041400
+/*
+ * Dropped from the API in Xen 4.14. At the time of writing, these functions
+ * don't appear to ever have had external callers.
+ *
+ * These have always been used internally during domain construction, and
+ * can't easily be used externally because of their implicit parameters in
+ * other pieces of global state.
+ *
+ * Furthermore, an API user can't usefully determine whether they get
+ * libxl_cpuid (the real implementation) or libxl_nocpuid (no-op stubs).
+ *
+ * The internal behaviour of these functions also needs to change. Therefore
+ * for simplicitly, provide the no-op stubs. Yes technically this is an API
+ * change in some cases for existing software, but there is 0 of that in
+ * practice.
+ */
+static inline void libxl_cpuid_apply_policy(libxl_ctx *ctx __attribute__((unused)),
+ uint32_t domid __attribute__((unused)))
+{}
+static inline void libxl_cpuid_set(libxl_ctx *ctx __attribute__((unused)),
+ uint32_t domid __attribute__((unused)),
+ libxl_cpuid_policy_list cpuid __attribute__((unused)))
+{}
+#endif
+
+/*
+ * Functions for allowing users of libxl to store private data
+ * relating to a domain. The data is an opaque sequence of bytes and
+ * is not interpreted or used by libxl.
+ *
+ * Data is indexed by the userdata userid, which is a short printable
+ * ASCII string. The following list is a registry of userdata userids
+ * (the registry may be updated by posting a patch to xen-devel):
+ *
+ * userid Data contents
+ * "xl" domain config file in xl format, Unix line endings
+ * "libvirt-xml" domain config file in libvirt XML format. See
+ * http://libvirt.org/formatdomain.html
+ * "domain-userdata-lock" lock file to protect domain userdata in libxl.
+ * It's a per-domain lock. Applications should
+ * not touch this file.
+ * "libxl-json" libxl_domain_config object in JSON format, generated
+ * by libxl. Applications should not access this file
+ * directly. This file is protected by domain-userdata-lock
+ * for against Read-Modify-Write operation and domain
+ * destruction.
+ *
+ * libxl does not enforce the registration of userdata userids or the
+ * semantics of the data. For specifications of the data formats
+ * see the code or documentation for the libxl caller in question.
+ */
+int libxl_userdata_store(libxl_ctx *ctx, uint32_t domid,
+ const char *userdata_userid,
+ const uint8_t *data, int datalen)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+ /* If datalen==0, data is not used and the user data for
+ * that domain and userdata_userid is deleted. */
+int libxl_userdata_retrieve(libxl_ctx *ctx, uint32_t domid,
+ const char *userdata_userid,
+ uint8_t **data_r, int *datalen_r)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+ /* On successful return, *data_r is from malloc.
+ * If there is no data for that domain and userdata_userid,
+ * *data_r and *datalen_r will be set to 0.
+ * data_r and datalen_r may be 0.
+ * On error return, *data_r and *datalen_r are undefined.
+ */
+int libxl_userdata_unlink(libxl_ctx *ctx, uint32_t domid,
+ const char *userdata_userid);
+
+
+int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo);
+int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
+ const libxl_bitmap *cpumap_hard,
+ const libxl_bitmap *cpumap_soft);
+int libxl_set_vcpuaffinity_force(libxl_ctx *ctx, uint32_t domid,
+ uint32_t vcpuid,
+ const libxl_bitmap *cpumap_hard,
+ const libxl_bitmap *cpumap_soft);
+int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid,
+ unsigned int max_vcpus,
+ const libxl_bitmap *cpumap_hard,
+ const libxl_bitmap *cpumap_soft);
+
+#if defined (LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040500
+
+#define libxl_set_vcpuaffinity(ctx, domid, vcpuid, map) \
+ libxl_set_vcpuaffinity((ctx), (domid), (vcpuid), (map), NULL)
+#define libxl_set_vcpuaffinity_all(ctx, domid, max_vcpus, map) \
+ libxl_set_vcpuaffinity_all((ctx), (domid), (max_vcpus), (map), NULL)
+
+#endif
+
+int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
+ libxl_bitmap *nodemap);
+int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
+ libxl_bitmap *nodemap);
+int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid,
+ libxl_bitmap *cpumap,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
+static inline int libxl_set_vcpuonline_0x041200(libxl_ctx *ctx,
+ uint32_t domid,
+ libxl_bitmap *cpumap)
+{
+ return libxl_set_vcpuonline(ctx, domid, cpumap, NULL);
+}
+#define libxl_set_vcpuonline libxl_set_vcpuonline_0x041200
+#endif
+
+/* A return value less than 0 should be interpreted as a libxl_error, while a
+ * return value greater than or equal to 0 should be interpreted as a
+ * libxl_scheduler. */
+int libxl_get_scheduler(libxl_ctx *ctx);
+
+/* Per-scheduler parameters */
+int libxl_sched_credit_params_get(libxl_ctx *ctx, uint32_t poolid,
+ libxl_sched_credit_params *scinfo);
+int libxl_sched_credit_params_set(libxl_ctx *ctx, uint32_t poolid,
+ libxl_sched_credit_params *scinfo);
+int libxl_sched_credit2_params_get(libxl_ctx *ctx, uint32_t poolid,
+ libxl_sched_credit2_params *scinfo);
+int libxl_sched_credit2_params_set(libxl_ctx *ctx, uint32_t poolid,
+ libxl_sched_credit2_params *scinfo);
+
+/* Scheduler Per-domain parameters */
+
+#define LIBXL_DOMAIN_SCHED_PARAM_WEIGHT_DEFAULT -1
+#define LIBXL_DOMAIN_SCHED_PARAM_CAP_DEFAULT -1
+#define LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT -1
+#define LIBXL_DOMAIN_SCHED_PARAM_SLICE_DEFAULT -1
+#define LIBXL_DOMAIN_SCHED_PARAM_LATENCY_DEFAULT -1
+#define LIBXL_DOMAIN_SCHED_PARAM_EXTRATIME_DEFAULT -1
+#define LIBXL_DOMAIN_SCHED_PARAM_BUDGET_DEFAULT -1
+
+/* Per-VCPU parameters */
+#define LIBXL_SCHED_PARAM_VCPU_INDEX_DEFAULT -1
+
+/* Get the per-domain scheduling parameters.
+ * For schedulers that support per-vcpu settings (e.g., RTDS),
+ * calling *_domain_get functions will get default scheduling
+ * parameters.
+ */
+int libxl_domain_sched_params_get(libxl_ctx *ctx, uint32_t domid,
+ libxl_domain_sched_params *params);
+
+/* Set the per-domain scheduling parameters.
+ * For schedulers that support per-vcpu settings (e.g., RTDS),
+ * calling *_domain_set functions will set all vcpus with the same
+ * scheduling parameters.
+ */
+int libxl_domain_sched_params_set(libxl_ctx *ctx, uint32_t domid,
+ const libxl_domain_sched_params *params);
+
+/* Get the per-vcpu scheduling parameters */
+int libxl_vcpu_sched_params_get(libxl_ctx *ctx, uint32_t domid,
+ libxl_vcpu_sched_params *params);
+
+/* Get the per-vcpu scheduling parameters of all vcpus of a domain */
+int libxl_vcpu_sched_params_get_all(libxl_ctx *ctx, uint32_t domid,
+ libxl_vcpu_sched_params *params);
+
+/* Set the per-vcpu scheduling parameters */
+int libxl_vcpu_sched_params_set(libxl_ctx *ctx, uint32_t domid,
+ const libxl_vcpu_sched_params *params);
+
+/* Set the per-vcpu scheduling parameters of all vcpus of a domain */
+int libxl_vcpu_sched_params_set_all(libxl_ctx *ctx, uint32_t domid,
+ const libxl_vcpu_sched_params *params);
+
+int libxl_send_trigger(libxl_ctx *ctx, uint32_t domid,
+ libxl_trigger trigger, uint32_t vcpuid,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
+static inline int libxl_send_trigger_0x041200(
+ libxl_ctx *ctx, uint32_t domid, libxl_trigger trigger, uint32_t vcpuid)
+{
+ return libxl_send_trigger(ctx, domid, trigger, vcpuid, NULL);
+}
+#define libxl_send_trigger libxl_send_trigger_0x041200
+#endif
+int libxl_send_sysrq(libxl_ctx *ctx, uint32_t domid, char sysrq);
+int libxl_send_debug_keys(libxl_ctx *ctx, char *keys);
+int libxl_set_parameters(libxl_ctx *ctx, char *params);
+
+typedef struct libxl__xen_console_reader libxl_xen_console_reader;
+
+libxl_xen_console_reader *
+ libxl_xen_console_read_start(libxl_ctx *ctx, int clear);
+int libxl_xen_console_read_line(libxl_ctx *ctx,
+ libxl_xen_console_reader *cr,
+ char **line_r);
+void libxl_xen_console_read_finish(libxl_ctx *ctx,
+ libxl_xen_console_reader *cr);
+
+uint32_t libxl_vm_get_start_time(libxl_ctx *ctx, uint32_t domid);
+
+char *libxl_tmem_list(libxl_ctx *ctx, uint32_t domid, int use_long);
+int libxl_tmem_freeze(libxl_ctx *ctx, uint32_t domid);
+int libxl_tmem_thaw(libxl_ctx *ctx, uint32_t domid);
+int libxl_tmem_set(libxl_ctx *ctx, uint32_t domid, char* name,
+ uint32_t set);
+int libxl_tmem_shared_auth(libxl_ctx *ctx, uint32_t domid, char* uuid,
+ int auth);
+int libxl_tmem_freeable(libxl_ctx *ctx);
+
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap);
+
+/*
+ * Set poolid to LIBXL_CPUOOL_POOLID_ANY to have Xen choose a
+ * free poolid for you.
+ */
+#define LIBXL_CPUPOOL_POOLID_ANY 0xFFFFFFFF
+int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
+ libxl_scheduler sched,
+ libxl_bitmap cpumap, libxl_uuid *uuid,
+ uint32_t *poolid);
+int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t poolid);
+int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid);
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus);
+int libxl_cpupool_cpuadd_cpumap(libxl_ctx *ctx, uint32_t poolid,
+ const libxl_bitmap *cpumap);
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
+int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus);
+int libxl_cpupool_cpuremove_cpumap(libxl_ctx *ctx, uint32_t poolid,
+ const libxl_bitmap *cpumap);
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
+int libxl_cpupool_info(libxl_ctx *ctx, libxl_cpupoolinfo *info, uint32_t poolid);
+
+int libxl_domid_valid_guest(uint32_t domid);
+
+int libxl_flask_context_to_sid(libxl_ctx *ctx, char *buf, size_t len,
+ uint32_t *ssidref);
+int libxl_flask_sid_to_context(libxl_ctx *ctx, uint32_t ssidref, char **buf,
+ size_t *len);
+int libxl_flask_getenforce(libxl_ctx *ctx);
+int libxl_flask_setenforce(libxl_ctx *ctx, int mode);
+int libxl_flask_loadpolicy(libxl_ctx *ctx, void *policy, uint32_t size);
+
+int libxl_ms_vm_genid_generate(libxl_ctx *ctx, libxl_ms_vm_genid *id);
+bool libxl_ms_vm_genid_is_zero(const libxl_ms_vm_genid *id);
+void libxl_ms_vm_genid_copy(libxl_ctx *ctx, libxl_ms_vm_genid *dst,
+ const libxl_ms_vm_genid *src);
+
+#if defined(__i386__) || defined(__x86_64__)
+int libxl_psr_cmt_attach(libxl_ctx *ctx, uint32_t domid);
+int libxl_psr_cmt_detach(libxl_ctx *ctx, uint32_t domid);
+int libxl_psr_cmt_domain_attached(libxl_ctx *ctx, uint32_t domid);
+int libxl_psr_cmt_enabled(libxl_ctx *ctx);
+int libxl_psr_cmt_get_total_rmid(libxl_ctx *ctx, uint32_t *total_rmid);
+int libxl_psr_cmt_get_l3_cache_size(libxl_ctx *ctx,
+ uint32_t socketid,
+ uint32_t *l3_cache_size);
+int libxl_psr_cmt_get_cache_occupancy(libxl_ctx *ctx,
+ uint32_t domid,
+ uint32_t socketid,
+ uint32_t *l3_cache_occupancy);
+
+int libxl_psr_cmt_type_supported(libxl_ctx *ctx, libxl_psr_cmt_type type);
+int libxl_psr_cmt_get_sample(libxl_ctx *ctx,
+ uint32_t domid,
+ libxl_psr_cmt_type type,
+ uint64_t scope,
+ uint64_t *sample_r,
+ uint64_t *tsc_r);
+
+/*
+ * Function to set a domain's cbm. It operates on a single or multiple
+ * target(s) defined in 'target_map'. The definition of 'target_map' is
+ * related to 'type':
+ * 'L3_CBM': 'target_map' specifies all the sockets to be operated on.
+ */
+int libxl_psr_cat_set_cbm(libxl_ctx *ctx, uint32_t domid,
+ libxl_psr_cbm_type type, libxl_bitmap *target_map,
+ uint64_t cbm);
+/*
+ * Function to get a domain's cbm. It operates on a single 'target'.
+ * The definition of 'target' is related to 'type':
+ * 'L3_CBM': 'target' specifies which socket to be operated on.
+ */
+int libxl_psr_cat_get_cbm(libxl_ctx *ctx, uint32_t domid,
+ libxl_psr_cbm_type type, uint32_t target,
+ uint64_t *cbm_r);
+
+/*
+ * On success, the function returns an array of elements in 'info',
+ * and the length in 'nr'.
+ */
+int libxl_psr_cat_get_info(libxl_ctx *ctx, libxl_psr_cat_info **info,
+ unsigned int *nr, unsigned int lvl);
+int libxl_psr_cat_get_l3_info(libxl_ctx *ctx, libxl_psr_cat_info **info,
+ int *nr);
+void libxl_psr_cat_info_list_free(libxl_psr_cat_info *list, int nr);
+
+typedef enum libxl_psr_cbm_type libxl_psr_type;
+
+/*
+ * Function to set a domain's value. It operates on a single or multiple
+ * target(s) defined in 'target_map'. 'target_map' specifies all the sockets
+ * to be operated on.
+ */
+int libxl_psr_set_val(libxl_ctx *ctx, uint32_t domid,
+ libxl_psr_type type, libxl_bitmap *target_map,
+ uint64_t val);
+/*
+ * Function to get a domain's cbm. It operates on a single 'target'.
+ * 'target' specifies which socket to be operated on.
+ */
+int libxl_psr_get_val(libxl_ctx *ctx, uint32_t domid,
+ libxl_psr_type type, unsigned int target,
+ uint64_t *val);
+/*
+ * On success, the function returns an array of elements in 'info',
+ * and the length in 'nr'.
+ */
+int libxl_psr_get_hw_info(libxl_ctx *ctx, libxl_psr_feat_type type,
+ unsigned int lvl, unsigned int *nr,
+ libxl_psr_hw_info **info);
+void libxl_psr_hw_info_list_free(libxl_psr_hw_info *list, unsigned int nr);
+#endif
+
+/* misc */
+
+/* Each of these sets or clears the flag according to whether the
+ * 2nd parameter is nonzero. On failure, they log, and
+ * return ERROR_FAIL, but also leave errno valid. */
+int libxl_fd_set_cloexec(libxl_ctx *ctx, int fd, int cloexec);
+int libxl_fd_set_nonblock(libxl_ctx *ctx, int fd, int nonblock);
+
+/*
+ * Issue a qmp monitor command to the device model of the specified domain.
+ * The function returns the output of the command in a new allocated buffer
+ * via output.
+ */
+int libxl_qemu_monitor_command(libxl_ctx *ctx, uint32_t domid,
+ const char *command_line, char **output,
+ const libxl_asyncop_how *ao_how)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
+static inline int libxl_qemu_monitor_command_0x041200(libxl_ctx *ctx,
+ uint32_t domid, const char *command_line, char **output)
+{
+ return libxl_qemu_monitor_command(ctx, domid, command_line, output,
+ NULL);
+}
+#define libxl_qemu_monitor_command libxl_qemu_monitor_command_0x041200
+#endif
+
+#include <libxl_event.h>
+
+/*
+ * This function is for use only during host initialisation. If it is
+ * invoked on a host with running domains, or concurrent libxl
+ * processes then the system may malfuntion.
+ */
+int libxl_clear_domid_history(libxl_ctx *ctx);
+
+#endif /* LIBXL_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * Copyright (C) 2011 Citrix Ltd.
+ * Author Ian Jackson <ian.jackson@eu.citrix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#ifndef LIBXL_EVENT_H
+#define LIBXL_EVENT_H
+
+#include <libxl.h>
+#include <poll.h>
+#include <sys/time.h>
+
+/*======================================================================*/
+
+/*
+ * Domain event handling - getting Xen events from libxl
+ *
+ * (Callers inside libxl may not call libxl_event_check or _wait.)
+ */
+
+#define LIBXL_EVENTMASK_ALL (~(unsigned long)0)
+
+typedef int libxl_event_predicate(const libxl_event*, void *user);
+ /* Return value is 0 if the event is unwanted or non-0 if it is.
+ * Predicates are not allowed to fail.
+ */
+
+int libxl_event_check(libxl_ctx *ctx, libxl_event **event_r,
+ uint64_t typemask,
+ libxl_event_predicate *predicate, void *predicate_user)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+ /* Searches for an event, already-happened, which matches typemask
+ * and predicate. predicate==0 matches any event.
+ * libxl_event_check returns the event, which must then later be
+ * freed by the caller using libxl_event_free.
+ *
+ * Returns ERROR_NOT_READY if no such event has happened.
+ */
+
+int libxl_event_wait(libxl_ctx *ctx, libxl_event **event_r,
+ uint64_t typemask,
+ libxl_event_predicate *predicate, void *predicate_user)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+ /* Like libxl_event_check but blocks if no suitable events are
+ * available, until some are. Uses libxl_osevent_beforepoll/
+ * _afterpoll so may be inefficient if very many domains are being
+ * handled by a single program.
+ */
+
+void libxl_event_free(libxl_ctx *ctx, libxl_event *event);
+
+
+/* Alternatively or additionally, the application may also use this: */
+
+typedef struct libxl_event_hooks {
+ uint64_t event_occurs_mask;
+ void (*event_occurs)(void *user,
+#ifndef LIBXL_HAVE_NONCONST_EVENT_OCCURS_EVENT_ARG
+ const
+#endif
+ libxl_event *event);
+ void (*disaster)(void *user, libxl_event_type type,
+ const char *msg, int errnoval);
+} libxl_event_hooks;
+
+void libxl_event_register_callbacks(libxl_ctx *ctx,
+ const libxl_event_hooks *hooks, void *user);
+ /*
+ * Arranges that libxl will henceforth call event_occurs for any
+ * events whose type is set in event_occurs_mask, rather than
+ * queueing the event for retrieval by libxl_event_check/wait.
+ * Events whose bit is clear in mask are not affected.
+ *
+ * event becomes owned by the application and must be freed, either
+ * by event_occurs or later.
+ *
+ * event_occurs may be NULL if mask is 0.
+ *
+ * libxl_event_register_callback also provides a way for libxl to
+ * report to the application that there was a problem reporting
+ * events; this can occur due to lack of host memory during event
+ * handling, or other wholly unrecoverable errors from system calls
+ * made by libxl. This will not happen for frivolous reasons - only
+ * if the system, or the Xen components of it, are badly broken.
+ *
+ * msg and errnoval will describe the action that libxl was trying
+ * to do, and type specifies the type of libxl events which may be
+ * missing. type may be 0 in which case events of all types may be
+ * missing.
+ *
+ * disaster may be NULL. If it is, or if _register_callbacks has
+ * not been called, errors of this kind are fatal to the entire
+ * application: libxl will print messages to its logs and to stderr
+ * and call exit(-1).
+ *
+ * If disaster returns, it may be the case that some or all future
+ * libxl calls will return errors; likewise it may be the case that
+ * no more events (of the specified type, if applicable) can be
+ * produced. An application which supplies a disaster function
+ * should normally react either by exiting, or by (when it has
+ * returned to its main event loop) shutting down libxl with
+ * libxl_ctx_free and perhaps trying to restart it with
+ * libxl_ctx_init.
+ *
+ * In any case before calling disaster, libxl will have logged a
+ * message with level XTL_CRITICAL.
+ *
+ * Reentrancy: it IS permitted to call libxl from within
+ * event_occurs. It is NOT permitted to call libxl from within
+ * disaster. The event_occurs and disaster callbacks may occur on
+ * any thread in which the application calls libxl.
+ *
+ * libxl_event_register_callbacks may be called as many times, with
+ * different parameters, as the application likes; the most recent
+ * call determines the libxl behaviour. However it is NOT safe to
+ * call _register_callbacks concurrently with, or reentrantly from,
+ * any other libxl function, nor while any event-generation
+ * facilities are enabled.
+ */
+
+
+/*
+ * Events are only generated if they have been requested.
+ * The following functions request the generation of specific events.
+ *
+ * Each set of functions for controlling event generation has this form:
+ *
+ * typedef struct libxl__evgen_FOO libxl__evgen_FOO;
+ * int libxl_evenable_FOO(libxl_ctx *ctx, FURTHER PARAMETERS,
+ * libxl_ev_user user, libxl__evgen_FOO **evgen_out);
+ * void libxl_evdisable_FOO(libxl_ctx *ctx, libxl__evgen_FOO *evgen);
+ *
+ * The evenable function arranges that the events (as described in the
+ * doc comment for the individual function) will start to be generated
+ * by libxl. On success, *evgen_out is set to a non-null pointer to
+ * an opaque struct.
+ *
+ * The user value is returned in the generated events and may be
+ * used by the caller for whatever it likes. The type ev_user is
+ * guaranteed to be an unsigned integer type which is at least
+ * as big as uint64_t and is also guaranteed to be big enough to
+ * contain any intptr_t value.
+ *
+ * If it becomes desirable to stop generation of the relevant events,
+ * or to reclaim the resources in libxl associated with the evgen
+ * structure, the same evgen value should be passed to the evdisable
+ * function. However, note that events which occurred prior to the
+ * evdisable call may still be returned.
+ *
+ * The caller may enable identical events more than once. If they do
+ * so, each actual occurrence will generate several events to be
+ * returned by libxl_event_check, with the appropriate user value(s).
+ * Aside from this, each occurrence of each event is returned by
+ * libxl_event_check exactly once.
+ *
+ * An evgen is associated with the libxl_ctx used for its creation.
+ * After libxl_ctx_free, all corresponding evgen handles become
+ * invalid and must no longer be passed to evdisable.
+ *
+ * Applications should ensure that they eventually retrieve every
+ * event using libxl_event_check or libxl_event_wait, since events
+ * which occur but are not retrieved by the application will be queued
+ * inside libxl indefinitely. libxl_event_check/_wait may be O(n)
+ * where n is the number of queued events which do not match the
+ * criteria specified in the arguments to check/wait.
+ */
+
+typedef struct libxl__evgen_domain_death libxl_evgen_domain_death;
+int libxl_evenable_domain_death(libxl_ctx *ctx, uint32_t domid,
+ libxl_ev_user, libxl_evgen_domain_death **evgen_out);
+void libxl_evdisable_domain_death(libxl_ctx *ctx, libxl_evgen_domain_death*);
+ /* Arranges for the generation of DOMAIN_SHUTDOWN and DOMAIN_DEATH
+ * events. A domain which is destroyed before it shuts down
+ * may generate only a DEATH event.
+ */
+
+typedef struct libxl__evgen_disk_eject libxl_evgen_disk_eject;
+int libxl_evenable_disk_eject(libxl_ctx *ctx, uint32_t domid, const char *vdev,
+ libxl_ev_user, libxl_evgen_disk_eject **evgen_out);
+void libxl_evdisable_disk_eject(libxl_ctx *ctx, libxl_evgen_disk_eject*);
+ /* Arranges for the generation of DISK_EJECT events. A copy of the
+ * string *vdev will be made for libxl's internal use, and a pointer
+ * to this (or some other) copy will be returned as the vdev
+ * member of event.u.
+ */
+
+
+/*======================================================================*/
+
+/*
+ * OS event handling - passing low-level OS events to libxl
+ *
+ * Event-driven programs must use these facilities to allow libxl
+ * to become aware of readability/writeability of file descriptors
+ * and the occurrence of timeouts.
+ *
+ * There are two approaches available. The first is appropriate for
+ * simple programs handling reasonably small numbers of domains:
+ *
+ * for (;;) {
+ * libxl_osevent_beforepoll(...)
+ * poll();
+ * libxl_osevent_afterpoll(...);
+ * for (;;) {
+ * r = libxl_event_check(...);
+ * if (r==ERROR_NOT_READY) break;
+ * if (r) goto error_out;
+ * do something with the event;
+ * }
+ * }
+ *
+ * The second approach uses libxl_osevent_register_hooks and is
+ * suitable for programs which are already using a callback-based
+ * event library.
+ *
+ * An application may freely mix the two styles of interaction.
+ *
+ * (Callers inside libxl may not call libxl_osevent_... functions.)
+ */
+
+struct pollfd;
+
+/* The caller should provide beforepoll with some space for libxl's
+ * fds, and tell libxl how much space is available by setting *nfds_io.
+ * fds points to the start of this space (and fds may be a pointer into
+ * a larger array, for example, if the application has some fds of
+ * its own that it is interested in).
+ *
+ * On return *nfds_io will in any case have been updated by libxl
+ * according to how many fds libxl wants to poll on.
+ *
+ * If the space was sufficient, libxl fills in fds[0..<new
+ * *nfds_io>] suitably for poll(2), updates *timeout_upd if needed,
+ * and returns ok.
+ *
+ * If space was insufficient, fds[0..<old *nfds_io>] is undefined on
+ * return; *nfds_io on return will be greater than the value on
+ * entry; *timeout_upd may or may not have been updated; and
+ * libxl_osevent_beforepoll returns ERROR_BUFERFULL. In this case
+ * the application needs to make more space (enough space for
+ * *nfds_io struct pollfd) and then call beforepoll again, before
+ * entering poll(2). Typically this will involve calling realloc.
+ *
+ * The application may call beforepoll with fds==NULL and
+ * *nfds_io==0 in order to find out how much space is needed.
+ *
+ * *timeout_upd is as for poll(2): it's in milliseconds, and
+ * negative values mean no timeout (infinity).
+ * libxl_osevent_beforepoll will only reduce the timeout, naturally.
+ */
+int libxl_osevent_beforepoll(libxl_ctx *ctx, int *nfds_io,
+ struct pollfd *fds, int *timeout_upd,
+ struct timeval now)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* nfds and fds[0..nfds] must be from the most recent call to
+ * _beforepoll, as modified by poll. (It is therefore not possible
+ * to have multiple threads simultaneously polling using this
+ * interface.)
+ *
+ * This function actually performs all of the IO and other actions,
+ * and generates events (libxl_event), which are implied by either
+ * (a) the time of day or (b) both (i) the returned information from
+ * _beforepoll, and (ii) the results from poll specified in
+ * fds[0..nfds-1]. Generated events can then be retrieved by
+ * libxl_event_check.
+ */
+void libxl_osevent_afterpoll(libxl_ctx *ctx, int nfds, const struct pollfd *fds,
+ struct timeval now)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+
+typedef struct libxl_osevent_hooks {
+ int (*fd_register)(void *user, int fd, void **for_app_registration_out,
+ short events, void *for_libxl);
+ int (*fd_modify)(void *user, int fd, void **for_app_registration_update,
+ short events);
+ void (*fd_deregister)(void *user, int fd, void *for_app_registration);
+ int (*timeout_register)(void *user, void **for_app_registration_out,
+ struct timeval abs, void *for_libxl);
+ int (*timeout_modify)(void *user, void **for_app_registration_update,
+ struct timeval abs)
+ /* only ever called with abs={0,0}, meaning ASAP */;
+ void (*timeout_deregister)(void *user, void *for_app_registration)
+ /* will never be called */;
+} libxl_osevent_hooks;
+
+/* The application which calls register_fd_hooks promises to
+ * maintain a register of fds and timeouts that libxl is interested
+ * in, and make calls into libxl (libxl_osevent_occurred_*)
+ * when those fd events and timeouts occur. This is more efficient
+ * than _beforepoll/_afterpoll if there are many fds (which can
+ * happen if the same libxl application is managing many domains).
+ *
+ * For an fd event, events is as for poll(). register or modify may
+ * be called with events==0, in which case it must still work
+ * normally, just not generate any events.
+ *
+ * For a timeout event, milliseconds is as for poll().
+ * Specifically, negative values of milliseconds mean NO TIMEOUT.
+ * This is used by libxl to temporarily disable a timeout.
+ *
+ * If the register or modify hook succeeds it may update
+ * *for_app_registration_out/_update and must then return 0.
+ * On entry to register, *for_app_registration_out is always NULL.
+ *
+ * A registration or modification hook may fail, in which case it
+ * must leave the registration state of the fd or timeout unchanged.
+ * It may then either return ERROR_OSEVENT_REG_FAIL or any positive
+ * int. The value returned will be passed up through libxl and
+ * eventually returned back to the application. When register
+ * fails, any value stored into *for_registration_out is ignored by
+ * libxl; when modify fails, any changed value stored into
+ * *for_registration_update is honoured by libxl and will be passed
+ * to future modify or deregister calls.
+ *
+ * libxl may want to register more than one callback for any one fd;
+ * in that case: (i) each such registration will have at least one bit
+ * set in revents which is unique to that registration; (ii) if an
+ * event occurs which is relevant for multiple registrations the
+ * application's event system may call libxl_osevent_occurred_fd
+ * for one, some, or all of those registrations.
+ *
+ * If fd_modify is used, it is permitted for the application's event
+ * system to still make calls to libxl_osevent_occurred_fd for the
+ * "old" set of requested events; these will be safely ignored by
+ * libxl.
+ *
+ * libxl will remember the value stored in *for_app_registration_out
+ * (or *for_app_registration_update) by a successful call to
+ * register (or modify), and pass it to subsequent calls to modify
+ * or deregister.
+ *
+ * Note that the application must cope with a call from libxl to
+ * timeout_modify racing with its own call to
+ * libxl__osevent_occurred_timeout. libxl guarantees that
+ * timeout_modify will only be called with abs={0,0} but the
+ * application must still ensure that libxl's attempt to cause the
+ * timeout to occur immediately is safely ignored even the timeout is
+ * actually already in the process of occurring.
+ *
+ * timeout_deregister is not used because it forms part of a
+ * deprecated unsafe mode of use of the API.
+ *
+ * osevent_register_hooks may be called only once for each libxl_ctx.
+ * libxl may make calls to register/modify/deregister from within
+ * any libxl function (indeed, it will usually call register from
+ * register_event_hooks). Conversely, the application MUST NOT make
+ * the event occurrence calls (libxl_osevent_occurred_*) into libxl
+ * reentrantly from within libxl (for example, from within the
+ * register/modify functions).
+ *
+ * Lock hierarchy: the register/modify/deregister functions may be
+ * called with locks held. These locks (the "libxl internal locks")
+ * are inside the libxl_ctx. Therefore, if those register functions
+ * acquire any locks of their own ("caller register locks") outside
+ * libxl, to avoid deadlock one of the following must hold for each
+ * such caller register lock:
+ * (a) "acquire libxl internal locks before caller register lock":
+ * No libxl function may be called with the caller register
+ * lock held.
+ * (b) "acquire caller register lock before libxl internal locks":
+ * No libxl function may be called _without_ the caller
+ * register lock held.
+ * Of these we would normally recommend (a).
+ *
+ * The value *hooks is not copied and must outlast the libxl_ctx.
+ */
+void libxl_osevent_register_hooks(libxl_ctx *ctx,
+ const libxl_osevent_hooks *hooks,
+ void *user);
+
+/* It is NOT legal to call _occurred_ reentrantly within any libxl
+ * function. Specifically it is NOT legal to call it from within
+ * a register callback. Conversely, libxl MAY call register/deregister
+ * from within libxl_event_occurred_call_*.
+ */
+
+void libxl_osevent_occurred_fd(libxl_ctx *ctx, void *for_libxl,
+ int fd, short events, short revents)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/* Implicitly, on entry to this function the timeout has been
+ * deregistered. If _occurred_timeout is called, libxl will not
+ * call timeout_deregister; if it wants to requeue the timeout it
+ * will call timeout_register again.
+ */
+void libxl_osevent_occurred_timeout(libxl_ctx *ctx, void *for_libxl)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+
+/*======================================================================*/
+
+/*
+ * Subprocess handling.
+ *
+ * Unfortunately the POSIX interface makes this very awkward.
+ *
+ * There are two possible arrangements for collecting statuses from
+ * wait/waitpid.
+ *
+ * For naive programs:
+ *
+ * libxl will keep a SIGCHLD handler installed whenever it has an
+ * active (unreaped) child. It will reap all children with
+ * wait(); any children it does not recognise will be passed to
+ * the application via an optional callback (and will result in
+ * logged warnings if no callback is provided or the callback
+ * denies responsibility for the child).
+ *
+ * libxl may have children whenever:
+ *
+ * - libxl is performing an operation which can be made
+ * asynchronous; ie one taking a libxl_asyncop_how, even
+ * if NULL is passed indicating that the operation is
+ * synchronous; or
+ *
+ * - events of any kind are being generated, as requested
+ * by libxl_evenable_....
+ *
+ * A multithreaded application which is naive in this sense may
+ * block SIGCHLD on some of its threads, but there must be at
+ * least one thread that has SIGCHLD unblocked. libxl will not
+ * modify the blocking flag for SIGCHLD (except that it may create
+ * internal service threads with all signals blocked).
+ *
+ * A naive program must only have at any one time only
+ * one libxl context which might have children.
+ *
+ * For programs which run their own children alongside libxl's:
+ *
+ * A program which does this must call libxl_childproc_setmode.
+ * There are three options:
+ *
+ * libxl_sigchld_owner_libxl:
+ *
+ * While any libxl operation which might use child processes
+ * is running, works like libxl_sigchld_owner_libxl_always;
+ * but, deinstalls the handler the rest of the time.
+ *
+ * In this mode, the application, while it uses any libxl
+ * operation which might create or use child processes (see
+ * above):
+ * - Must not have any child processes running.
+ * - Must not install a SIGCHLD handler.
+ * - Must not reap any children.
+ *
+ * This is the default (i.e. if setmode is not called, or 0 is
+ * passed for hooks).
+ *
+ * libxl_sigchld_owner_mainloop:
+ *
+ * The application must install a SIGCHLD handler and reap (at
+ * least) all of libxl's children and pass their exit status to
+ * libxl by calling libxl_childproc_exited. (If the application
+ * has multiple libxl ctx's, it must call libxl_childproc_exited
+ * on each ctx.)
+ *
+ * libxl_sigchld_owner_libxl_always:
+ *
+ * The application expects this libxl ctx to reap all of the
+ * process's children, and provides a callback to be notified of
+ * their exit statuses. The application must have only one
+ * libxl_ctx configured this way.
+ *
+ * libxl_sigchld_owner_libxl_always_selective_reap:
+ *
+ * The application expects to reap all of its own children
+ * synchronously, and does not use SIGCHLD. libxl is to install
+ * a SIGCHLD handler. The application may have multiple
+ * libxl_ctxs configured this way; in which case all of its ctxs
+ * must be so configured.
+ */
+
+
+typedef enum {
+ /* libxl owns SIGCHLD whenever it has a child, and reaps
+ * all children, including those not spawned by libxl. */
+ libxl_sigchld_owner_libxl,
+
+ /* Application promises to discover when SIGCHLD occurs and call
+ * libxl_childproc_exited or libxl_childproc_sigchld_occurred (but
+ * NOT from within a signal handler). libxl will not itself
+ * arrange to (un)block or catch SIGCHLD. */
+ libxl_sigchld_owner_mainloop,
+
+ /* libxl owns SIGCHLD all the time, and the application is
+ * relying on libxl's event loop for reaping its children too. */
+ libxl_sigchld_owner_libxl_always,
+
+ /* libxl owns SIGCHLD all the time, but it must only reap its own
+ * children. The application will reap its own children
+ * synchronously with waitpid, without the assistance of SIGCHLD. */
+ libxl_sigchld_owner_libxl_always_selective_reap,
+} libxl_sigchld_owner;
+
+typedef struct {
+ libxl_sigchld_owner chldowner;
+
+ /* All of these are optional: */
+
+ /* Called by libxl instead of fork. Should behave exactly like
+ * fork, including setting errno etc. May NOT reenter into libxl.
+ * Application may use this to discover pids of libxl's children,
+ * for example.
+ */
+ pid_t (*fork_replacement)(void *user);
+
+ /* With libxl_sigchld_owner_libxl, called by libxl when it has
+ * reaped a pid. (Not permitted with _owner_mainloop.)
+ *
+ * Should return 0 if the child was recognised by the application
+ * (or if the application does not keep those kind of records),
+ * ERROR_UNKNOWN_CHILD if the application knows that the child is not
+ * the application's; if it returns another error code it is a
+ * disaster as described for libxl_event_register_callbacks.
+ * (libxl will report unexpected children to its error log.)
+ *
+ * If not supplied, the application is assumed not to start
+ * any children of its own.
+ *
+ * This function is NOT called from within the signal handler.
+ * Rather it will be called from inside a libxl's event handling
+ * code and thus only when libxl is running, for example from
+ * within libxl_event_wait. (libxl uses the self-pipe trick
+ * to implement this.)
+ *
+ * childproc_exited_callback may call back into libxl, but it
+ * is best to avoid making long-running libxl calls as that might
+ * stall the calling event loop while the nested operation
+ * completes.
+ */
+ int (*reaped_callback)(pid_t, int status, void *user);
+} libxl_childproc_hooks;
+
+/* hooks may be 0 in which is equivalent to &{ libxl_sigchld_owner_libxl, 0, 0 }
+ *
+ * May not be called when libxl might have any child processes, or the
+ * behaviour is undefined. So it is best to call this at
+ * initialisation.
+ *
+ * The value *hooks is not copied and must outlast the libxl_ctx.
+ */
+void libxl_childproc_setmode(libxl_ctx *ctx, const libxl_childproc_hooks *hooks,
+ void *user);
+
+/*
+ * This function is for an application which owns SIGCHLD and which
+ * reaps all of the process's children, and dispatches the exit status
+ * to the correct place inside the application.
+ *
+ * May be called only by an application which has called setmode with
+ * chldowner == libxl_sigchld_owner_mainloop. If pid was a process started
+ * by this instance of libxl, returns 0 after doing whatever
+ * processing is appropriate. Otherwise silently returns
+ * ERROR_UNKNOWN_CHILD. No other error returns are possible.
+ *
+ * May NOT be called from within a signal handler which might
+ * interrupt any libxl operation. The application will almost
+ * certainly need to use the self-pipe trick (or a working pselect or
+ * ppoll) to implement this.
+ */
+int libxl_childproc_reaped(libxl_ctx *ctx, pid_t, int status)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+/*
+ * This function is for an application which owns SIGCHLD but which
+ * doesn't keep track of all of its own children in a manner suitable
+ * for reaping all of them and then dispatching them.
+ *
+ * Such an the application must notify libxl, by calling this
+ * function, that a SIGCHLD occurred. libxl will then check all its
+ * children, reap any that are ready, and take any action necessary -
+ * but it will not reap anything else.
+ *
+ * May be called only by an application which has called setmode with
+ * chldowner == libxl_sigchld_owner_mainloop.
+ *
+ * May NOT be called from within a signal handler which might
+ * interrupt any libxl operation (just like libxl_childproc_reaped).
+ */
+void libxl_childproc_sigchld_occurred(libxl_ctx *ctx)
+ LIBXL_EXTERNAL_CALLERS_ONLY;
+
+
+/*
+ * An application which initialises a libxl_ctx in a parent process
+ * and then forks a child which does not quickly exec, must
+ * instead libxl_postfork_child_noexec in the child. One call
+ * on any existing (or specially made) ctx is sufficient; after
+ * this all previously existing libxl_ctx's are invalidated and
+ * must not be used - or even freed. It is harmless to call this
+ * postfork function and then exec anyway.
+ *
+ * Until libxl_postfork_child_noexec has returned:
+ * - No other libxl calls may be made.
+ * - If any libxl ctx was configured handle the process's SIGCHLD,
+ * the child may not create further (grand)child processes, nor
+ * manipulate SIGCHLD.
+ *
+ * libxl_postfork_child_noexec may not reclaim all the resources
+ * associated with the libxl ctx. This includes but is not limited
+ * to: ordinary memory; files on disk and in /var/run; file
+ * descriptors; memory mapped into the process from domains being
+ * managed (grant maps); Xen event channels. Use of libxl in
+ * processes which fork long-lived children is not recommended for
+ * this reason. libxl_postfork_child_noexec is provided so that
+ * an application can make further libxl calls in a child which
+ * is going to exec or exit soon.
+ */
+void libxl_postfork_child_noexec(libxl_ctx *ctx);
+
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * Copyright (C) 2011 Citrix Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#ifndef LIBXL_JSON_H
+#define LIBXL_JSON_H
+
+#include <yajl/yajl_gen.h>
+#include <yajl/yajl_parse.h>
+
+#ifdef HAVE_YAJL_YAJL_VERSION_H
+# include <yajl/yajl_version.h>
+#endif
+
+yajl_gen_status libxl__uint64_gen_json(yajl_gen hand, uint64_t val);
+yajl_gen_status libxl_defbool_gen_json(yajl_gen hand, libxl_defbool *p);
+yajl_gen_status libxl_uuid_gen_json(yajl_gen hand, libxl_uuid *p);
+yajl_gen_status libxl_mac_gen_json(yajl_gen hand, libxl_mac *p);
+yajl_gen_status libxl_bitmap_gen_json(yajl_gen hand, libxl_bitmap *p);
+yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
+ libxl_cpuid_policy_list *p);
+yajl_gen_status libxl_string_list_gen_json(yajl_gen hand, libxl_string_list *p);
+yajl_gen_status libxl_key_value_list_gen_json(yajl_gen hand,
+ libxl_key_value_list *p);
+yajl_gen_status libxl_hwcap_gen_json(yajl_gen hand, libxl_hwcap *p);
+yajl_gen_status libxl_ms_vm_genid_gen_json(yajl_gen hand, libxl_ms_vm_genid *p);
+
+#include <_libxl_types_json.h>
+
+/* YAJL version check */
+#if defined(YAJL_MAJOR) && (YAJL_MAJOR > 1)
+# define HAVE_YAJL_V2 1
+#endif
+
+#ifdef HAVE_YAJL_V2
+
+typedef size_t libxl_yajl_length;
+
+static inline yajl_handle libxl__yajl_alloc(const yajl_callbacks *callbacks,
+ yajl_alloc_funcs *allocFuncs,
+ void *ctx)
+{
+ yajl_handle hand = yajl_alloc(callbacks, allocFuncs, ctx);
+ if (hand)
+ yajl_config(hand, yajl_allow_trailing_garbage, 1);
+ return hand;
+}
+
+static inline yajl_gen libxl_yajl_gen_alloc(const yajl_alloc_funcs *allocFuncs)
+{
+ yajl_gen g;
+ g = yajl_gen_alloc(allocFuncs);
+ if (g)
+ yajl_gen_config(g, yajl_gen_beautify, 1);
+ return g;
+}
+
+#else /* !HAVE_YAJL_V2 */
+
+#define yajl_complete_parse yajl_parse_complete
+
+typedef unsigned int libxl_yajl_length;
+
+static inline yajl_handle libxl__yajl_alloc(const yajl_callbacks *callbacks,
+ const yajl_alloc_funcs *allocFuncs,
+ void *ctx)
+{
+ yajl_parser_config cfg = {
+ .allowComments = 1,
+ .checkUTF8 = 1,
+ };
+ return yajl_alloc(callbacks, &cfg, allocFuncs, ctx);
+}
+
+static inline yajl_gen libxl_yajl_gen_alloc(const yajl_alloc_funcs *allocFuncs)
+{
+ yajl_gen_config conf = { 1, " " };
+ return yajl_gen_alloc(&conf, allocFuncs);
+}
+
+#endif /* !HAVE_YAJL_V2 */
+
+yajl_gen_status libxl_domain_config_gen_json(yajl_gen hand,
+ libxl_domain_config *p);
+
+#endif /* LIBXL_JSON_H */
--- /dev/null
+/*
+ * Copyright (C) 2009 Citrix Ltd.
+ * Author Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#ifndef LIBXL_UTILS_H
+#define LIBXL_UTILS_H
+
+#include "libxl.h"
+
+#ifndef LIBXL_HAVE_NONCONST_LIBXL_BASENAME_RETURN_VALUE
+const
+#endif
+char *libxl_basename(const char *name); /* returns string from strdup */
+
+unsigned long libxl_get_required_shadow_memory(unsigned long maxmem_kb, unsigned int smp_cpus);
+ /* deprecated; see LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONFIG in libxl.h */
+int libxl_name_to_domid(libxl_ctx *ctx, const char *name, uint32_t *domid);
+int libxl_domain_qualifier_to_domid(libxl_ctx *ctx, const char *name, uint32_t *domid);
+char *libxl_domid_to_name(libxl_ctx *ctx, uint32_t domid);
+int libxl_cpupool_qualifier_to_cpupoolid(libxl_ctx *ctx, const char *p,
+ uint32_t *poolid_r,
+ int *was_name_r);
+int libxl_name_to_cpupoolid(libxl_ctx *ctx, const char *name, uint32_t *poolid);
+char *libxl_cpupoolid_to_name(libxl_ctx *ctx, uint32_t poolid);
+int libxl_cpupoolid_is_valid(libxl_ctx *ctx, uint32_t poolid);
+int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid);
+int libxl_is_stubdom(libxl_ctx *ctx, uint32_t domid, uint32_t *target_domid);
+int libxl_create_logfile(libxl_ctx *ctx, const char *name, char **full_name);
+int libxl_string_to_backend(libxl_ctx *ctx, char *s, libxl_disk_backend *backend);
+
+int libxl_read_file_contents(libxl_ctx *ctx, const char *filename,
+ void **data_r, int *datalen_r);
+ /* Reads the contents of the plain file filename into a mallocd
+ * buffer. Returns 0 or errno. Any errors other than ENOENT are logged.
+ * If the file is empty, *data_r and *datalen_r are set to 0.
+ * On error, *data_r and *datalen_r are unchanged.
+ * data_r and/or datalen_r may be 0.
+ */
+
+int libxl_read_exactly(libxl_ctx *ctx, int fd, void *data, ssize_t sz,
+ const char *filename, const char *what);
+int libxl_write_exactly(libxl_ctx *ctx, int fd, const void *data,
+ ssize_t sz, const char *filename, const char *what);
+ /* Returns 0 or errno. If file is truncated on reading, returns
+ * EPROTO and you have no way to tell how much was read. Errors are
+ * logged using filename (which is only used for logging) and what
+ * (which may be 0). */
+
+int libxl_pipe(libxl_ctx *ctx, int pipes[2]);
+ /* Just like pipe(2), but log errors. */
+
+void libxl_report_child_exitstatus(libxl_ctx *ctx, xentoollog_level,
+ const char *what, pid_t pid, int status);
+ /* treats all exit statuses as errors; if that's not what you want,
+ * check status yourself first */
+
+int libxl_mac_to_device_nic(libxl_ctx *ctx, uint32_t domid,
+ const char *mac, libxl_device_nic *nic);
+int libxl_devid_to_device_nic(libxl_ctx *ctx, uint32_t domid, int devid,
+ libxl_device_nic *nic);
+
+int libxl_vdev_to_device_disk(libxl_ctx *ctx, uint32_t domid, const char *vdev,
+ libxl_device_disk *disk);
+
+int libxl_uuid_to_device_vtpm(libxl_ctx *ctx, uint32_t domid,
+ libxl_uuid *uuid, libxl_device_vtpm *vtpm);
+int libxl_devid_to_device_vtpm(libxl_ctx *ctx, uint32_t domid,
+ int devid, libxl_device_vtpm *vtpm);
+int libxl_devid_to_device_usbctrl(libxl_ctx *ctx, uint32_t domid,
+ int devid, libxl_device_usbctrl *usbctrl);
+
+int libxl_devid_to_device_vkb(libxl_ctx *ctx, uint32_t domid,
+ int devid, libxl_device_vkb *vkb);
+
+int libxl_devid_to_device_vdispl(libxl_ctx *ctx, uint32_t domid,
+ int devid, libxl_device_vdispl *vdispl);
+
+int libxl_devid_to_device_vsnd(libxl_ctx *ctx, uint32_t domid,
+ int devid, libxl_device_vsnd *vsnd);
+
+int libxl_ctrlport_to_device_usbdev(libxl_ctx *ctx, uint32_t domid,
+ int ctrl, int port,
+ libxl_device_usbdev *usbdev);
+
+int libxl_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *bitmap, int n_bits);
+ /* Allocated bimap is from malloc, libxl_bitmap_dispose() to be
+ * called by the application when done. */
+void libxl_bitmap_copy_alloc(libxl_ctx *ctx, libxl_bitmap *dptr,
+ const libxl_bitmap *sptr);
+void libxl_bitmap_copy(libxl_ctx *ctx, libxl_bitmap *dptr,
+ const libxl_bitmap *sptr);
+int libxl_bitmap_is_full(const libxl_bitmap *bitmap);
+int libxl_bitmap_is_empty(const libxl_bitmap *bitmap);
+int libxl_bitmap_test(const libxl_bitmap *bitmap, int bit);
+void libxl_bitmap_set(libxl_bitmap *bitmap, int bit);
+void libxl_bitmap_reset(libxl_bitmap *bitmap, int bit);
+int libxl_bitmap_count_set(const libxl_bitmap *bitmap);
+int libxl_bitmap_or(libxl_ctx *ctx, libxl_bitmap *or_map,
+ const libxl_bitmap *map1,
+ const libxl_bitmap *map2);
+int libxl_bitmap_and(libxl_ctx *ctx, libxl_bitmap *and_map,
+ const libxl_bitmap *map1,
+ const libxl_bitmap *map2);
+char *libxl_bitmap_to_hex_string(libxl_ctx *ctx, const libxl_bitmap *bitmap);
+static inline void libxl_bitmap_set_any(libxl_bitmap *bitmap)
+{
+ memset(bitmap->map, -1, bitmap->size);
+}
+static inline void libxl_bitmap_set_none(libxl_bitmap *bitmap)
+{
+ memset(bitmap->map, 0, bitmap->size);
+}
+static inline int libxl_bitmap_cpu_valid(libxl_bitmap *bitmap, int bit)
+{
+ return bit >= 0 && bit < (bitmap->size * 8);
+}
+#define libxl_for_each_bit(var, map) for (var = 0; var < (map).size * 8; var++)
+#define libxl_for_each_set_bit(v, m) for (v = 0; v < (m).size * 8; v++) \
+ if (libxl_bitmap_test(&(m), v))
+
+/*
+ * Compares two bitmaps bit by bit, up to nr_bits or, if nr_bits is 0, up
+ * to the size of the largest bitmap. If sizes does not match, bits past the
+ * of a bitmap are considered as being 0, which matches with the semantic and
+ * implementation of libxl_bitmap_test I think().
+ *
+ * So, basically, [0,1,0] and [0,1] are considered equal, while [0,1,1] and
+ * [0,1] are different.
+ */
+static inline int libxl_bitmap_equal(const libxl_bitmap *ba,
+ const libxl_bitmap *bb,
+ int nr_bits)
+{
+ int i;
+
+ if (nr_bits == 0)
+ nr_bits = ba->size > bb->size ? ba->size * 8 : bb->size * 8;
+
+ for (i = 0; i < nr_bits; i++) {
+ if (libxl_bitmap_test(ba, i) != libxl_bitmap_test(bb, i))
+ return 0;
+ }
+ return 1;
+}
+
+int libxl_cpu_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *cpumap, int max_cpus);
+int libxl_node_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *nodemap,
+ int max_nodes);
+int libxl_socket_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *socketmap,
+ int max_sockets);
+/* Fill socketmap with the CPU topology information on the system. */
+int libxl_get_online_socketmap(libxl_ctx *ctx, libxl_bitmap *socketmap);
+
+/* Populate cpumap with the cpus spanned by the nodes in nodemap */
+int libxl_nodemap_to_cpumap(libxl_ctx *ctx,
+ const libxl_bitmap *nodemap,
+ libxl_bitmap *cpumap);
+/* Populate cpumap with the cpus spanned by node */
+int libxl_node_to_cpumap(libxl_ctx *ctx, int node,
+ libxl_bitmap *cpumap);
+/* Populate nodemap with the nodes of the cpus in cpumap */
+int libxl_cpumap_to_nodemap(libxl_ctx *ctx,
+ const libxl_bitmap *cpumap,
+ libxl_bitmap *nodemap);
+
+ static inline uint32_t libxl__sizekb_to_mb(uint32_t s) {
+ return (s + 1023) / 1024;
+}
+
+void libxl_string_copy(libxl_ctx *ctx, char **dst, char * const*src);
+
+
+#define LIBXL_FILLZERO(object) (memset(&(object), 0, sizeof((object))))
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * Copyright (C) 2008,2010 Citrix Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#ifndef __LIBXL_UUID_H__
+#define __LIBXL_UUID_H__
+
+#define LIBXL_UUID_FMT "%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx"
+#define LIBXL_UUID_FMTLEN ((2*16)+4) /* 16 hex bytes plus 4 hypens */
+#define LIBXL__UUID_BYTES(uuid) uuid[0], uuid[1], uuid[2], uuid[3], \
+ uuid[4], uuid[5], uuid[6], uuid[7], \
+ uuid[8], uuid[9], uuid[10], uuid[11], \
+ uuid[12], uuid[13], uuid[14], uuid[15]
+#define LIBXL_UUID_BYTES(arg) LIBXL__UUID_BYTES((arg).uuid)
+
+typedef struct {
+ /* UUID as an octet stream in big-endian byte-order. */
+ unsigned char uuid[16];
+} libxl_uuid;
+
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040700
+#if defined(__linux__)
+
+#include <uuid/uuid.h>
+#include <stdint.h>
+
+#elif defined(__FreeBSD__) || defined(__NetBSD__)
+
+#include <uuid.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+
+#else
+
+#error "Please update libxl_uuid.h for your OS"
+
+#endif
+#endif
+
+int libxl_uuid_is_nil(const libxl_uuid *uuid);
+void libxl_uuid_generate(libxl_uuid *uuid);
+int libxl_uuid_from_string(libxl_uuid *uuid, const char *in);
+void libxl_uuid_copy(libxl_ctx *ctx_opt, libxl_uuid *dst,
+ const libxl_uuid *src);
+#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040500
+static inline void libxl_uuid_copy_0x040400(libxl_uuid *dst,
+ const libxl_uuid *src)
+{
+ libxl_uuid_copy(NULL, dst, src);
+}
+#define libxl_uuid_copy libxl_uuid_copy_0x040400
+#endif
+
+void libxl_uuid_clear(libxl_uuid *uuid);
+int libxl_uuid_compare(const libxl_uuid *uuid1, const libxl_uuid *uuid2);
+const uint8_t *libxl_uuid_bytearray_const(const libxl_uuid *uuid);
+uint8_t *libxl_uuid_bytearray(libxl_uuid *uuid);
+
+#endif /* __LIBXL_UUID_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * Copyright (C) 2010 Citrix Ltd.
+ * Author Ian Jackson <ian.jackson@eu.citrix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#ifndef LIBXLUTIL_H
+#define LIBXLUTIL_H
+
+#include <stdio.h>
+
+#include "libxl.h"
+
+enum XLU_ConfigValueType {
+ XLU_STRING,
+ XLU_LIST,
+};
+
+enum XLU_Operation {
+ XLU_OP_ASSIGNMENT = 0,
+ XLU_OP_ADDITION,
+};
+
+/* Unless otherwise stated, all functions return an errno value. */
+typedef struct XLU_Config XLU_Config;
+typedef struct XLU_ConfigList XLU_ConfigList;
+typedef struct XLU_ConfigValue XLU_ConfigValue;
+
+XLU_Config *xlu_cfg_init(FILE *report, const char *report_filename);
+ /* 0 means we got ENOMEM. */
+ /* report_filename is copied; report is saved and must remain valid
+ * until the Config is destroyed. */
+
+int xlu_cfg_readfile(XLU_Config*, const char *real_filename);
+int xlu_cfg_readdata(XLU_Config*, const char *data, int length);
+ /* If these fail, then it is undefined behaviour to call xlu_cfg_get_...
+ * functions. You have to just xlu_cfg_destroy. */
+
+void xlu_cfg_destroy(XLU_Config*);
+
+
+/* All of the following print warnings to "report" if there is a problem.
+ * Return values are:
+ * 0 OK
+ * ESRCH not defined
+ * EINVAL value found but wrong format for request (prints warning unless dont_warn=true)
+ * ERANGE value out of range (from strtol)
+ */
+
+int xlu_cfg_get_string(const XLU_Config*, const char *n, const char **value_r,
+ int dont_warn);
+/* free/strdup version */
+int xlu_cfg_replace_string(const XLU_Config *cfg, const char *n,
+ char **value_r, int dont_warn);
+int xlu_cfg_get_long(const XLU_Config*, const char *n, long *value_r,
+ int dont_warn);
+int xlu_cfg_get_bounded_long(const XLU_Config*, const char *n, long min,
+ long max, long *value_r, int dont_warn);
+int xlu_cfg_get_defbool(const XLU_Config*, const char *n, libxl_defbool *b,
+ int dont_warn);
+
+int xlu_cfg_get_list(const XLU_Config*, const char *n,
+ XLU_ConfigList **list_r /* may be 0 */,
+ int *entries_r /* may be 0 */,
+ int dont_warn);
+ /* there is no need to free *list_r; lifetime is that of the XLU_Config */
+int xlu_cfg_get_list_as_string_list(const XLU_Config *cfg, const char *n,
+ libxl_string_list *sl, int dont_warn);
+const char *xlu_cfg_get_listitem(const XLU_ConfigList*, int entry);
+ /* xlu_cfg_get_listitem cannot fail, except that if entry is
+ * out of range it returns 0 (not setting errno) */
+
+enum XLU_ConfigValueType xlu_cfg_value_type(const XLU_ConfigValue *value);
+int xlu_cfg_value_get_string(const XLU_Config *cfg, XLU_ConfigValue *value,
+ char **value_r, int dont_warn);
+int xlu_cfg_value_get_list(const XLU_Config *cfg, XLU_ConfigValue *value,
+ XLU_ConfigList **value_r, int dont_warn);
+XLU_ConfigValue *xlu_cfg_get_listitem2(const XLU_ConfigList *list,
+ int entry);
+
+/*
+ * Disk specification parsing.
+ */
+
+int xlu_disk_parse(XLU_Config *cfg, int nspecs, const char *const *specs,
+ libxl_device_disk *disk);
+ /* disk must have been initialised.
+ *
+ * On error, returns errno value. Bad strings cause EINVAL and
+ * print a message to cfg's report (that's all cfg is used for).
+ *
+ * Normally one would pass nspecs==1 and only specs[0]. But it is
+ * permitted to pass more strings in which case each is parsed as a
+ * string containing a collection of parameters (but they all refer
+ * to of the configuration for a single disk).
+ *
+ * nspecs==0 is permitted but since it does not specify some mandatory
+ * properties, it produces a run-time configuration error if the
+ * resulting disk struct is used with libxl.
+ */
+
+/*
+ * PCI specification parsing
+ */
+int xlu_pci_parse_bdf(XLU_Config *cfg, libxl_device_pci *pcidev, const char *str);
+
+/*
+ * RDM parsing
+ */
+int xlu_rdm_parse(XLU_Config *cfg, libxl_rdm_reserve *rdm, const char *str);
+
+/*
+ * Vif rate parsing.
+ */
+
+int xlu_vif_parse_rate(XLU_Config *cfg, const char *rate,
+ libxl_device_nic *nic);
+
+#endif /* LIBXLUTIL_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef XENCALL_H
+#define XENCALL_H
+
+/*
+ * This library allows you to make arbitrary hypercalls (subject to
+ * sufficient permission for the process and the domain itself). Note
+ * that while the library interface is stable the hypercalls are
+ * subject to their own rules.
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+
+/* Callers who don't care don't need to #include <xentoollog.h> */
+struct xentoollog_logger;
+
+typedef struct xencall_handle xencall_handle;
+
+/*
+ */
+#define XENCALL_OPENFLAG_NON_REENTRANT (1U<<0)
+
+/*
+ * Return a handle onto the hypercall driver. Logs errors.
+ * *
+ * Note: After fork(2) a child process must not use any opened
+ * xencall handle inherited from their parent, nor access any
+ * hypercall argument buffers associated with that handle.
+ *
+ * The child must open a new handle if they want to interact with
+ * xencall.
+ *
+ * Calling exec(2) in a child will safely (and reliably) reclaim any
+ * resources which were allocated via a xencall_handle in the parent.
+ *
+ * A child which does not call exec(2) may safely call xencall_close()
+ * on a xencall_handle inherited from their parent. This will attempt
+ * to reclaim any resources associated with that handle. Note that in
+ * some implementations this reclamation may not be completely
+ * effective, in this case any affected resources remain allocated.
+ *
+ * Calling xencall_close() is the only safe operation on a
+ * xencall_handle which has been inherited.
+ */
+xencall_handle *xencall_open(struct xentoollog_logger *logger,
+ unsigned open_flags);
+
+/*
+ * Close a handle previously allocated with xencall_open().
+ *
+ * Under normal circumstances (i.e. not in the child after a fork) any
+ * allocated hypercall argument buffers should be freed using the
+ * appropriate xencall_free_*() prior to closing the handle in order
+ * to free up resources associated with those mappings.
+ *
+ * This is the only function which may be safely called on a
+ * xencall_handle in a child after a fork. xencall_free_*() must not
+ * be called under such circumstances.
+ */
+int xencall_close(xencall_handle *xcall);
+
+/*
+ * Return the fd used internally by xencall. selecting on it is not
+ * useful. But it could be useful for unusual use cases; perhaps,
+ * passing to other programs, calling ioctls on directly, or maybe
+ * calling fcntl.
+ */
+int xencall_fd(xencall_handle *xcall);
+
+/*
+ * Call hypercalls with varying numbers of arguments.
+ *
+ * On success the return value of the hypercall is the return value of
+ * the xencall function. On error these functions set errno and
+ * return -1.
+ *
+ * The errno values will be either:
+ * - The Xen hypercall error return (from xen/include/public/errno.h)
+ * translated into the corresponding local value for that POSIX error.
+ * - An errno value produced by the OS driver or the library
+ * implementation. Such values may be defined by POSIX or by the OS.
+ *
+ * Note that under some circumstances it will not be possible to tell
+ * whether an error came from Xen or from the OS/library.
+ *
+ * These functions never log.
+ */
+int xencall0(xencall_handle *xcall, unsigned int op);
+int xencall1(xencall_handle *xcall, unsigned int op,
+ uint64_t arg1);
+int xencall2(xencall_handle *xcall, unsigned int op,
+ uint64_t arg1, uint64_t arg2);
+int xencall3(xencall_handle *xcall, unsigned int op,
+ uint64_t arg1, uint64_t arg2, uint64_t arg3);
+int xencall4(xencall_handle *xcall, unsigned int op,
+ uint64_t arg1, uint64_t arg2, uint64_t arg3,
+ uint64_t arg4);
+int xencall5(xencall_handle *xcall, unsigned int op,
+ uint64_t arg1, uint64_t arg2, uint64_t arg3,
+ uint64_t arg4, uint64_t arg5);
+
+/*
+ * Allocate and free memory which is suitable for use as a pointer
+ * argument to a hypercall.
+ */
+void *xencall_alloc_buffer_pages(xencall_handle *xcall, size_t nr_pages);
+void xencall_free_buffer_pages(xencall_handle *xcall, void *p, size_t nr_pages);
+
+void *xencall_alloc_buffer(xencall_handle *xcall, size_t size);
+void xencall_free_buffer(xencall_handle *xcall, void *p);
+
+/*
+ * Are allocated hypercall buffers safe to be accessed by the hypervisor all
+ * the time?
+ * Returns 0 if EFAULT might be possible.
+ */
+int xencall_buffers_never_fault(xencall_handle *xcall);
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/******************************************************************************
+ * xenctrl.h
+ *
+ * A library for low-level access to the Xen control interfaces.
+ *
+ * Copyright (c) 2003-2004, K A Fraser.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XENCTRL_H
+#define XENCTRL_H
+
+/* Tell the Xen public headers we are a user-space tools build. */
+#ifndef __XEN_TOOLS__
+#define __XEN_TOOLS__ 1
+#endif
+
+#include <unistd.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <xen/xen.h>
+#include <xen/domctl.h>
+#include <xen/physdev.h>
+#include <xen/sysctl.h>
+#include <xen/version.h>
+#include <xen/event_channel.h>
+#include <xen/sched.h>
+#include <xen/memory.h>
+#include <xen/grant_table.h>
+#include <xen/hvm/dm_op.h>
+#include <xen/hvm/params.h>
+#include <xen/xsm/flask_op.h>
+#include <xen/kexec.h>
+#include <xen/platform.h>
+
+#include "xentoollog.h"
+
+#if defined(__i386__) || defined(__x86_64__)
+#include <xen/foreign/x86_32.h>
+#include <xen/foreign/x86_64.h>
+#include <xen/arch-x86/xen-mca.h>
+#endif
+
+#define XC_PAGE_SHIFT 12
+#define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
+#define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
+
+#define INVALID_MFN (~0UL)
+
+/*
+ * DEFINITIONS FOR CPU BARRIERS
+ */
+
+#define xen_barrier() asm volatile ( "" : : : "memory")
+
+#if defined(__i386__)
+#define xen_mb() asm volatile ( "lock addl $0, -4(%%esp)" ::: "memory" )
+#define xen_rmb() xen_barrier()
+#define xen_wmb() xen_barrier()
+#elif defined(__x86_64__)
+#define xen_mb() asm volatile ( "lock addl $0, -32(%%rsp)" ::: "memory" )
+#define xen_rmb() xen_barrier()
+#define xen_wmb() xen_barrier()
+#elif defined(__arm__)
+#define xen_mb() asm volatile ("dmb" : : : "memory")
+#define xen_rmb() asm volatile ("dmb" : : : "memory")
+#define xen_wmb() asm volatile ("dmb" : : : "memory")
+#elif defined(__aarch64__)
+#define xen_mb() asm volatile ("dmb sy" : : : "memory")
+#define xen_rmb() asm volatile ("dmb sy" : : : "memory")
+#define xen_wmb() asm volatile ("dmb sy" : : : "memory")
+#else
+#error "Define barriers"
+#endif
+
+
+#define XENCTRL_HAS_XC_INTERFACE 1
+/* In Xen 4.0 and earlier, xc_interface_open and xc_evtchn_open would
+ * both return ints being the file descriptor. In 4.1 and later, they
+ * return an xc_interface* and xc_evtchn*, respectively - ie, a
+ * pointer to an opaque struct. This #define is provided in 4.1 and
+ * later, allowing out-of-tree callers to more easily distinguish
+ * between, and be compatible with, both versions.
+ */
+
+
+/*
+ * GENERAL
+ *
+ * Unless otherwise specified, each function here returns zero or a
+ * non-null pointer on success; or in case of failure, sets errno and
+ * returns -1 or a null pointer.
+ *
+ * Unless otherwise specified, errors result in a call to the error
+ * handler function, which by default prints a message to the
+ * FILE* passed as the caller_data, which by default is stderr.
+ * (This is described below as "logging errors".)
+ *
+ * The error handler can safely trash errno, as libxc saves it across
+ * the callback.
+ */
+
+typedef struct xc_interface_core xc_interface;
+
+enum xc_error_code {
+ XC_ERROR_NONE = 0,
+ XC_INTERNAL_ERROR = 1,
+ XC_INVALID_KERNEL = 2,
+ XC_INVALID_PARAM = 3,
+ XC_OUT_OF_MEMORY = 4,
+ /* new codes need to be added to xc_error_level_to_desc too */
+};
+
+typedef enum xc_error_code xc_error_code;
+
+
+/*
+ * INITIALIZATION FUNCTIONS
+ */
+
+/**
+ * This function opens a handle to the hypervisor interface. This function can
+ * be called multiple times within a single process. Multiple processes can
+ * have an open hypervisor interface at the same time.
+ *
+ * Note:
+ * After fork a child process must not use any opened xc interface
+ * handle inherited from their parent. They must open a new handle if
+ * they want to interact with xc.
+ *
+ * Each call to this function should have a corresponding call to
+ * xc_interface_close().
+ *
+ * This function can fail if the caller does not have superuser permission or
+ * if a Xen-enabled kernel is not currently running.
+ *
+ * @return a handle to the hypervisor interface
+ */
+xc_interface *xc_interface_open(xentoollog_logger *logger,
+ xentoollog_logger *dombuild_logger,
+ unsigned open_flags);
+ /* if logger==NULL, will log to stderr
+ * if dombuild_logger=NULL, will log to a file
+ */
+
+/*
+ * Note: if XC_OPENFLAG_NON_REENTRANT is passed then libxc must not be
+ * called reentrantly and the calling application is responsible for
+ * providing mutual exclusion surrounding all libxc calls itself.
+ *
+ * In particular xc_{get,clear}_last_error only remain valid for the
+ * duration of the critical section containing the call which failed.
+ */
+enum xc_open_flags {
+ XC_OPENFLAG_DUMMY = 1<<0, /* do not actually open a xenctrl interface */
+ XC_OPENFLAG_NON_REENTRANT = 1<<1, /* assume library is only every called from a single thread */
+};
+
+/**
+ * This function closes an open hypervisor interface.
+ *
+ * This function can fail if the handle does not represent an open interface or
+ * if there were problems closing the interface. In the latter case
+ * the interface is still closed.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @return 0 on success, -1 otherwise.
+ */
+int xc_interface_close(xc_interface *xch);
+
+/**
+ * Return the handles which xch has opened and will use for
+ * hypercalls, foreign memory accesses and device model operations.
+ * These may be used with the corresponding libraries so long as the
+ * xch itself remains open.
+ */
+struct xencall_handle *xc_interface_xcall_handle(xc_interface *xch);
+struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch);
+struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch);
+
+/*
+ * HYPERCALL SAFE MEMORY BUFFER
+ *
+ * Ensure that memory which is passed to a hypercall has been
+ * specially allocated in order to be safe to access from the
+ * hypervisor.
+ *
+ * Each user data pointer is shadowed by an xc_hypercall_buffer data
+ * structure. You should never define an xc_hypercall_buffer type
+ * directly, instead use the DECLARE_HYPERCALL_BUFFER* macros below.
+ *
+ * The strucuture should be considered opaque and all access should be
+ * via the macros and helper functions defined below.
+ *
+ * Once the buffer is declared the user is responsible for explicitly
+ * allocating and releasing the memory using
+ * xc_hypercall_buffer_alloc(_pages) and
+ * xc_hypercall_buffer_free(_pages).
+ *
+ * Once the buffer has been allocated the user can initialise the data
+ * via the normal pointer. The xc_hypercall_buffer structure is
+ * transparently referenced by the helper macros (such as
+ * xen_set_guest_handle) in order to check at compile time that the
+ * correct type of memory is being used.
+ */
+struct xc_hypercall_buffer {
+ /* Hypercall safe memory buffer. */
+ void *hbuf;
+
+ /*
+ * Reference to xc_hypercall_buffer passed as argument to the
+ * current function.
+ */
+ struct xc_hypercall_buffer *param_shadow;
+
+ /*
+ * Direction of copy for bounce buffering.
+ */
+ int dir;
+
+ /* Used iff dir != 0. */
+ void *ubuf;
+ size_t sz;
+};
+typedef struct xc_hypercall_buffer xc_hypercall_buffer_t;
+
+/*
+ * Construct the name of the hypercall buffer for a given variable.
+ * For internal use only
+ */
+#define XC__HYPERCALL_BUFFER_NAME(_name) xc__hypercall_buffer_##_name
+
+/*
+ * Returns the hypercall_buffer associated with a variable.
+ */
+#define HYPERCALL_BUFFER(_name) \
+ ({ xc_hypercall_buffer_t _hcbuf_buf1; \
+ typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_buf2 = \
+ &XC__HYPERCALL_BUFFER_NAME(_name); \
+ (void)(&_hcbuf_buf1 == _hcbuf_buf2); \
+ (_hcbuf_buf2)->param_shadow ? \
+ (_hcbuf_buf2)->param_shadow : (_hcbuf_buf2); \
+ })
+
+#define HYPERCALL_BUFFER_INIT_NO_BOUNCE .dir = 0, .sz = 0, .ubuf = (void *)-1
+
+/*
+ * Defines a hypercall buffer and user pointer with _name of _type.
+ *
+ * The user accesses the data as normal via _name which will be
+ * transparently converted to the hypercall buffer as necessary.
+ */
+#define DECLARE_HYPERCALL_BUFFER(_type, _name) \
+ _type *(_name) = NULL; \
+ xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
+ .hbuf = NULL, \
+ .param_shadow = NULL, \
+ HYPERCALL_BUFFER_INIT_NO_BOUNCE \
+ }
+
+/*
+ * Like DECLARE_HYPERCALL_BUFFER() but using an already allocated
+ * hypercall buffer, _hbuf.
+ *
+ * Useful when a hypercall buffer is passed to a function and access
+ * via the user pointer is required.
+ *
+ * See DECLARE_HYPERCALL_BUFFER_ARGUMENT() if the user pointer is not
+ * required.
+ */
+#define DECLARE_HYPERCALL_BUFFER_SHADOW(_type, _name, _hbuf) \
+ _type *(_name) = (_hbuf)->hbuf; \
+ __attribute__((unused)) \
+ xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
+ .hbuf = (void *)-1, \
+ .param_shadow = (_hbuf), \
+ HYPERCALL_BUFFER_INIT_NO_BOUNCE \
+ }
+
+/*
+ * Declare the necessary data structure to allow a hypercall buffer
+ * passed as an argument to a function to be used in the normal way.
+ */
+#define DECLARE_HYPERCALL_BUFFER_ARGUMENT(_name) \
+ xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
+ .hbuf = (void *)-1, \
+ .param_shadow = (_name), \
+ HYPERCALL_BUFFER_INIT_NO_BOUNCE \
+ }
+
+/*
+ * Get the hypercall buffer data pointer in a form suitable for use
+ * directly as a hypercall argument.
+ */
+#define HYPERCALL_BUFFER_AS_ARG(_name) \
+ ({ xc_hypercall_buffer_t _hcbuf_arg1; \
+ typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_arg2 = \
+ HYPERCALL_BUFFER(_name); \
+ (void)(&_hcbuf_arg1 == _hcbuf_arg2); \
+ (unsigned long)(_hcbuf_arg2)->hbuf; \
+ })
+
+/*
+ * Set a xen_guest_handle in a type safe manner, ensuring that the
+ * data pointer has been correctly allocated.
+ */
+#define set_xen_guest_handle_impl(_hnd, _val, _byte_off) \
+ do { \
+ xc_hypercall_buffer_t _hcbuf_hnd1; \
+ typeof(XC__HYPERCALL_BUFFER_NAME(_val)) *_hcbuf_hnd2 = \
+ HYPERCALL_BUFFER(_val); \
+ (void) (&_hcbuf_hnd1 == _hcbuf_hnd2); \
+ set_xen_guest_handle_raw(_hnd, \
+ (_hcbuf_hnd2)->hbuf + (_byte_off)); \
+ } while (0)
+
+#undef set_xen_guest_handle
+#define set_xen_guest_handle(_hnd, _val) \
+ set_xen_guest_handle_impl(_hnd, _val, 0)
+
+#define set_xen_guest_handle_offset(_hnd, _val, _off) \
+ set_xen_guest_handle_impl(_hnd, _val, \
+ ((sizeof(*_val)*(_off))))
+
+/* Use with set_xen_guest_handle in place of NULL */
+extern xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL);
+
+/*
+ * Allocate and free hypercall buffers with byte granularity.
+ */
+void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size);
+#define xc_hypercall_buffer_alloc(_xch, _name, _size) xc__hypercall_buffer_alloc(_xch, HYPERCALL_BUFFER(_name), _size)
+void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b);
+#define xc_hypercall_buffer_free(_xch, _name) xc__hypercall_buffer_free(_xch, HYPERCALL_BUFFER(_name))
+
+/*
+ * Allocate and free hypercall buffers with page alignment.
+ */
+void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
+#define xc_hypercall_buffer_alloc_pages(_xch, _name, _nr) xc__hypercall_buffer_alloc_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
+void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
+#define xc_hypercall_buffer_free_pages(_xch, _name, _nr) \
+ do { \
+ if ( _name ) \
+ xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name), \
+ _nr); \
+ } while (0)
+
+/*
+ * Array of hypercall buffers.
+ *
+ * Create an array with xc_hypercall_buffer_array_create() and
+ * populate it by declaring one hypercall buffer in a loop and
+ * allocating the buffer with xc_hypercall_buffer_array_alloc().
+ *
+ * To access a previously allocated buffers, declare a new hypercall
+ * buffer and call xc_hypercall_buffer_array_get().
+ *
+ * Destroy the array with xc_hypercall_buffer_array_destroy() to free
+ * the array and all its allocated hypercall buffers.
+ */
+struct xc_hypercall_buffer_array;
+typedef struct xc_hypercall_buffer_array xc_hypercall_buffer_array_t;
+
+xc_hypercall_buffer_array_t *xc_hypercall_buffer_array_create(xc_interface *xch, unsigned n);
+void *xc__hypercall_buffer_array_alloc(xc_interface *xch, xc_hypercall_buffer_array_t *array,
+ unsigned index, xc_hypercall_buffer_t *hbuf, size_t size);
+#define xc_hypercall_buffer_array_alloc(_xch, _array, _index, _name, _size) \
+ xc__hypercall_buffer_array_alloc(_xch, _array, _index, HYPERCALL_BUFFER(_name), _size)
+void *xc__hypercall_buffer_array_get(xc_interface *xch, xc_hypercall_buffer_array_t *array,
+ unsigned index, xc_hypercall_buffer_t *hbuf);
+#define xc_hypercall_buffer_array_get(_xch, _array, _index, _name, _size) \
+ xc__hypercall_buffer_array_get(_xch, _array, _index, HYPERCALL_BUFFER(_name))
+void xc_hypercall_buffer_array_destroy(xc_interface *xc, xc_hypercall_buffer_array_t *array);
+
+/*
+ * CPUMAP handling
+ */
+typedef uint8_t *xc_cpumap_t;
+
+/* return maximum number of cpus the hypervisor supports */
+int xc_get_max_cpus(xc_interface *xch);
+
+/* return the number of online cpus */
+int xc_get_online_cpus(xc_interface *xch);
+
+/* return array size for cpumap */
+int xc_get_cpumap_size(xc_interface *xch);
+
+/* allocate a cpumap */
+xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
+
+/* clear an CPU from the cpumap. */
+void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map);
+
+/* set an CPU in the cpumap. */
+void xc_cpumap_setcpu(int cpu, xc_cpumap_t map);
+
+/* Test whether the CPU in cpumap is set. */
+int xc_cpumap_testcpu(int cpu, xc_cpumap_t map);
+
+/*
+ * NODEMAP handling
+ */
+typedef uint8_t *xc_nodemap_t;
+
+/* return maximum number of NUMA nodes the hypervisor supports */
+int xc_get_max_nodes(xc_interface *xch);
+
+/* return array size for nodemap */
+int xc_get_nodemap_size(xc_interface *xch);
+
+/* allocate a nodemap */
+xc_nodemap_t xc_nodemap_alloc(xc_interface *xch);
+
+/*
+ * DOMAIN DEBUGGING FUNCTIONS
+ */
+
+typedef struct xc_core_header {
+ unsigned int xch_magic;
+ unsigned int xch_nr_vcpus;
+ unsigned int xch_nr_pages;
+ unsigned int xch_ctxt_offset;
+ unsigned int xch_index_offset;
+ unsigned int xch_pages_offset;
+} xc_core_header_t;
+
+#define XC_CORE_MAGIC 0xF00FEBED
+#define XC_CORE_MAGIC_HVM 0xF00FEBEE
+
+/*
+ * DOMAIN MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_dominfo {
+ uint32_t domid;
+ uint32_t ssidref;
+ unsigned int dying:1, crashed:1, shutdown:1,
+ paused:1, blocked:1, running:1,
+ hvm:1, debugged:1, xenstore:1, hap:1;
+ unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
+ unsigned long nr_pages; /* current number, not maximum */
+ unsigned long nr_outstanding_pages;
+ unsigned long nr_shared_pages;
+ unsigned long nr_paged_pages;
+ unsigned long shared_info_frame;
+ uint64_t cpu_time;
+ unsigned long max_memkb;
+ unsigned int nr_online_vcpus;
+ unsigned int max_vcpu_id;
+ xen_domain_handle_t handle;
+ unsigned int cpupool;
+ struct xen_arch_domainconfig arch_config;
+} xc_dominfo_t;
+
+typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
+
+typedef union
+{
+#if defined(__i386__) || defined(__x86_64__)
+ vcpu_guest_context_x86_64_t x64;
+ vcpu_guest_context_x86_32_t x32;
+#endif
+ vcpu_guest_context_t c;
+} vcpu_guest_context_any_t;
+
+typedef union
+{
+#if defined(__i386__) || defined(__x86_64__)
+ shared_info_x86_64_t x64;
+ shared_info_x86_32_t x32;
+#endif
+ shared_info_t s;
+} shared_info_any_t;
+
+#if defined(__i386__) || defined(__x86_64__)
+typedef union
+{
+ start_info_x86_64_t x64;
+ start_info_x86_32_t x32;
+ start_info_t s;
+} start_info_any_t;
+#endif
+
+typedef struct xc_vcpu_extstate {
+ uint64_t xfeature_mask;
+ uint64_t size;
+ void *buffer;
+} xc_vcpu_extstate_t;
+
+int xc_domain_create(xc_interface *xch, uint32_t *pdomid,
+ struct xen_domctl_createdomain *config);
+
+
+/* Functions to produce a dump of a given domain
+ * xc_domain_dumpcore - produces a dump to a specified file
+ * xc_domain_dumpcore_via_callback - produces a dump, using a specified
+ * callback function
+ */
+int xc_domain_dumpcore(xc_interface *xch,
+ uint32_t domid,
+ const char *corename);
+
+/* Define the callback function type for xc_domain_dumpcore_via_callback.
+ *
+ * This function is called by the coredump code for every "write",
+ * and passes an opaque object for the use of the function and
+ * created by the caller of xc_domain_dumpcore_via_callback.
+ */
+typedef int (dumpcore_rtn_t)(xc_interface *xch,
+ void *arg, char *buffer, unsigned int length);
+
+int xc_domain_dumpcore_via_callback(xc_interface *xch,
+ uint32_t domid,
+ void *arg,
+ dumpcore_rtn_t dump_rtn);
+
+/*
+ * This function sets the maximum number of vcpus that a domain may create.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id in which vcpus are to be created.
+ * @parm max the maximum number of vcpus that the domain may create.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_max_vcpus(xc_interface *xch,
+ uint32_t domid,
+ unsigned int max);
+
+/**
+ * This function pauses a domain. A paused domain still exists in memory
+ * however it does not receive any timeslices from the hypervisor.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to pause
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_pause(xc_interface *xch,
+ uint32_t domid);
+/**
+ * This function unpauses a domain. The domain should have been previously
+ * paused.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to unpause
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_unpause(xc_interface *xch,
+ uint32_t domid);
+
+/**
+ * This function will destroy a domain. Destroying a domain removes the domain
+ * completely from memory. This function should be called after sending the
+ * domain a SHUTDOWN control message to free up the domain resources.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to destroy
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_destroy(xc_interface *xch,
+ uint32_t domid);
+
+
+/**
+ * This function resumes a suspended domain. The domain should have
+ * been previously suspended.
+ *
+ * Note that there are 'xc_domain_suspend' as suspending a domain
+ * is quite the endeavour.
+ *
+ * For the purpose of this explanation there are three guests:
+ * PV (using hypercalls for privilgied operations), HVM
+ * (fully hardware virtualized guests using emulated devices for everything),
+ * and PVHVM (PV aware with hardware virtualisation).
+ *
+ * HVM guest are the simplest - they suspend via S3 / S4 and resume from
+ * S3 / S4. Upon resume they have to re-negotiate with the emulated devices.
+ *
+ * PV and PVHVM communicate via hypercalls for suspend (and resume).
+ * For suspend the toolstack initiates the process by writing an value
+ * in XenBus "control/shutdown" with the string "suspend".
+ *
+ * The PV guest stashes anything it deems neccessary in 'struct
+ * start_info' in case of failure (PVHVM may ignore this) and calls
+ * the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall (for PV as
+ * argument it passes the MFN to 'struct start_info').
+ *
+ * And then the guest is suspended.
+ *
+ * The checkpointing or notifying a guest that the suspend failed or
+ * cancelled (in case of checkpoint) is by having the
+ * SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return a non-zero
+ * value.
+ *
+ * The PV and PVHVM resume path are similar. For PV it would be
+ * similar to bootup - figure out where the 'struct start_info' is (or
+ * if the suspend was cancelled aka checkpointed - reuse the saved
+ * values).
+ *
+ * From here on they differ depending whether the guest is PV or PVHVM
+ * in specifics but follow overall the same path:
+ * - PV: Bringing up the vCPUS,
+ * - PVHVM: Setup vector callback,
+ * - Bring up vCPU runstates,
+ * - Remap the grant tables if checkpointing or setup from scratch,
+ *
+ *
+ * If the resume was not checkpointing (or if suspend was succesful) we would
+ * setup the PV timers and the different PV events. Lastly the PV drivers
+ * re-negotiate with the backend.
+ *
+ * This function would return before the guest started resuming. That is
+ * the guest would be in non-running state and its vCPU context would be
+ * in the the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return path
+ * (for PV and PVHVM). For HVM it would be in would be in QEMU emulated
+ * BIOS handling S3 suspend.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to resume
+ * @parm fast use cooperative resume (guest must support this)
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_resume(xc_interface *xch,
+ uint32_t domid,
+ int fast);
+
+/**
+ * This function will shutdown a domain. This is intended for use in
+ * fully-virtualized domains where this operation is analogous to the
+ * sched_op operations in a paravirtualized domain. The caller is
+ * expected to give the reason for the shutdown.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to destroy
+ * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_shutdown(xc_interface *xch,
+ uint32_t domid,
+ int reason);
+
+int xc_watchdog(xc_interface *xch,
+ uint32_t id,
+ uint32_t timeout);
+
+/**
+ * This function explicitly sets the host NUMA nodes the domain will
+ * have affinity with.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id one wants to set the affinity of.
+ * @parm nodemap the map of the affine nodes.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_node_setaffinity(xc_interface *xch,
+ uint32_t domind,
+ xc_nodemap_t nodemap);
+
+/**
+ * This function retrieves the host NUMA nodes the domain has
+ * affinity with.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id one wants to get the node affinity of.
+ * @parm nodemap the map of the affine nodes.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_node_getaffinity(xc_interface *xch,
+ uint32_t domind,
+ xc_nodemap_t nodemap);
+
+/**
+ * This function specifies the CPU affinity for a vcpu.
+ *
+ * There are two kinds of affinity. Soft affinity is on what CPUs a vcpu
+ * prefers to run. Hard affinity is on what CPUs a vcpu is allowed to run.
+ * If flags contains XEN_VCPUAFFINITY_SOFT, the soft affinity it is set to
+ * what cpumap_soft_inout contains. If flags contains XEN_VCPUAFFINITY_HARD,
+ * the hard affinity is set to what cpumap_hard_inout contains. Both flags
+ * can be set at the same time, in which case both soft and hard affinity are
+ * set to what the respective parameter contains.
+ *
+ * The function also returns the effective hard or/and soft affinity, still
+ * via the cpumap_soft_inout and cpumap_hard_inout parameters. Effective
+ * affinity is, in case of soft affinity, the intersection of soft affinity,
+ * hard affinity and the cpupool's online CPUs for the domain, and is returned
+ * in cpumap_soft_inout, if XEN_VCPUAFFINITY_SOFT is set in flags. In case of
+ * hard affinity, it is the intersection between hard affinity and the
+ * cpupool's online CPUs, and is returned in cpumap_hard_inout, if
+ * XEN_VCPUAFFINITY_HARD is set in flags. If both flags are set, both soft
+ * and hard affinity are returned in the respective parameter.
+ *
+ * We do report it back as effective affinity is what the Xen scheduler will
+ * actually use, and we thus allow checking whether or not that matches with,
+ * or at least is good enough for, the caller's purposes.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap_hard_inout specifies(/returns) the (effective) hard affinity
+ * @param cpumap_soft_inout specifies(/returns) the (effective) soft affinity
+ * @param flags what we want to set
+ */
+int xc_vcpu_setaffinity(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap_hard_inout,
+ xc_cpumap_t cpumap_soft_inout,
+ uint32_t flags);
+
+/**
+ * This function retrieves hard and soft CPU affinity of a vcpu,
+ * depending on what flags are set.
+ *
+ * Soft affinity is returned in cpumap_soft if XEN_VCPUAFFINITY_SOFT is set.
+ * Hard affinity is returned in cpumap_hard if XEN_VCPUAFFINITY_HARD is set.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap_hard is where hard affinity is returned
+ * @param cpumap_soft is where soft affinity is returned
+ * @param flags what we want get
+ */
+int xc_vcpu_getaffinity(xc_interface *xch,
+ uint32_t domid,
+ int vcpu,
+ xc_cpumap_t cpumap_hard,
+ xc_cpumap_t cpumap_soft,
+ uint32_t flags);
+
+
+/**
+ * This function will return the guest_width (in bytes) for the
+ * specified domain.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the domain id one wants the address size width of.
+ * @param addr_size the address size.
+ */
+int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
+ unsigned int *guest_width);
+
+
+/**
+ * This function will return information about one or more domains. It is
+ * designed to iterate over the list of domains. If a single domain is
+ * requested, this function will return the next domain in the list - if
+ * one exists. It is, therefore, important in this case to make sure the
+ * domain requested was the one returned.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm first_domid the first domain to enumerate information from. Domains
+ * are currently enumerate in order of creation.
+ * @parm max_doms the number of elements in info
+ * @parm info an array of max_doms size that will contain the information for
+ * the enumerated domains.
+ * @return the number of domains enumerated or -1 on error
+ */
+int xc_domain_getinfo(xc_interface *xch,
+ uint32_t first_domid,
+ unsigned int max_doms,
+ xc_dominfo_t *info);
+
+
+/**
+ * This function will set the execution context for the specified vcpu.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to set the vcpu context for
+ * @parm vcpu the vcpu number for the context
+ * @parm ctxt pointer to the the cpu context with the values to set
+ * @return the number of domains enumerated or -1 on error
+ */
+int xc_vcpu_setcontext(xc_interface *xch,
+ uint32_t domid,
+ uint32_t vcpu,
+ vcpu_guest_context_any_t *ctxt);
+/**
+ * This function will return information about one or more domains, using a
+ * single hypercall. The domain information will be stored into the supplied
+ * array of xc_domaininfo_t structures.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm first_domain the first domain to enumerate information from.
+ * Domains are currently enumerate in order of creation.
+ * @parm max_domains the number of elements in info
+ * @parm info an array of max_doms size that will contain the information for
+ * the enumerated domains.
+ * @return the number of domains enumerated or -1 on error
+ */
+int xc_domain_getinfolist(xc_interface *xch,
+ uint32_t first_domain,
+ unsigned int max_domains,
+ xc_domaininfo_t *info);
+
+/**
+ * This function set p2m for broken page
+ * &parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id which broken page belong to
+ * @parm pfn the pfn number of the broken page
+ * @return 0 on success, -1 on failure
+ */
+int xc_set_broken_page_p2m(xc_interface *xch,
+ uint32_t domid,
+ unsigned long pfn);
+
+/**
+ * This function returns information about the context of a hvm domain
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm ctxt_buf a pointer to a structure to store the execution context of
+ * the hvm domain
+ * @parm size the size of ctxt_buf in bytes
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_getcontext(xc_interface *xch,
+ uint32_t domid,
+ uint8_t *ctxt_buf,
+ uint32_t size);
+
+
+/**
+ * This function returns one element of the context of a hvm domain
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm typecode which type of elemnt required
+ * @parm instance which instance of the type
+ * @parm ctxt_buf a pointer to a structure to store the execution context of
+ * the hvm domain
+ * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_getcontext_partial(xc_interface *xch,
+ uint32_t domid,
+ uint16_t typecode,
+ uint16_t instance,
+ void *ctxt_buf,
+ uint32_t size);
+
+/**
+ * This function will set the context for hvm domain
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to set the hvm domain context for
+ * @parm hvm_ctxt pointer to the the hvm context with the values to set
+ * @parm size the size of hvm_ctxt in bytes
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_setcontext(xc_interface *xch,
+ uint32_t domid,
+ uint8_t *hvm_ctxt,
+ uint32_t size);
+
+/**
+ * This function will return guest IO ABI protocol
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get IO ABI protocol for
+ * @return guest protocol on success, NULL on failure
+ */
+const char *xc_domain_get_native_protocol(xc_interface *xch,
+ uint32_t domid);
+
+/**
+ * This function returns information about the execution context of a
+ * particular vcpu of a domain.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm vcpu the vcpu number
+ * @parm ctxt a pointer to a structure to store the execution context of the
+ * domain
+ * @return 0 on success, -1 on failure
+ */
+int xc_vcpu_getcontext(xc_interface *xch,
+ uint32_t domid,
+ uint32_t vcpu,
+ vcpu_guest_context_any_t *ctxt);
+
+/**
+ * This function initializes the vuart emulation and returns
+ * the event to be used by the backend for communicating with
+ * the emulation code.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * #parm type type of vuart
+ * @parm domid the domain to get information from
+ * @parm console_domid the domid of the backend console
+ * @parm gfn the guest pfn to be used as the ring buffer
+ * @parm evtchn the event channel to be used for events
+ * @return 0 on success, negative error on failure
+ */
+int xc_dom_vuart_init(xc_interface *xch,
+ uint32_t type,
+ uint32_t domid,
+ uint32_t console_domid,
+ xen_pfn_t gfn,
+ evtchn_port_t *evtchn);
+
+/**
+ * This function returns information about the XSAVE state of a particular
+ * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
+ * the call is considered a query to retrieve them and the buffer is not
+ * filled.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm vcpu the vcpu number
+ * @parm extstate a pointer to a structure to store the XSAVE state of the
+ * domain
+ * @return 0 on success, negative error code on failure
+ */
+int xc_vcpu_get_extstate(xc_interface *xch,
+ uint32_t domid,
+ uint32_t vcpu,
+ xc_vcpu_extstate_t *extstate);
+
+typedef struct xen_domctl_getvcpuinfo xc_vcpuinfo_t;
+int xc_vcpu_getinfo(xc_interface *xch,
+ uint32_t domid,
+ uint32_t vcpu,
+ xc_vcpuinfo_t *info);
+
+long long xc_domain_get_cpu_usage(xc_interface *xch,
+ uint32_t domid,
+ int vcpu);
+
+int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
+ xen_domain_handle_t handle);
+
+typedef struct xen_domctl_shadow_op_stats xc_shadow_op_stats_t;
+int xc_shadow_control(xc_interface *xch,
+ uint32_t domid,
+ unsigned int sop,
+ xc_hypercall_buffer_t *dirty_bitmap,
+ unsigned long pages,
+ unsigned long *mb,
+ uint32_t mode,
+ xc_shadow_op_stats_t *stats);
+
+int xc_sched_credit_domain_set(xc_interface *xch,
+ uint32_t domid,
+ struct xen_domctl_sched_credit *sdom);
+
+int xc_sched_credit_domain_get(xc_interface *xch,
+ uint32_t domid,
+ struct xen_domctl_sched_credit *sdom);
+int xc_sched_credit_params_set(xc_interface *xch,
+ uint32_t cpupool_id,
+ struct xen_sysctl_credit_schedule *schedule);
+int xc_sched_credit_params_get(xc_interface *xch,
+ uint32_t cpupool_id,
+ struct xen_sysctl_credit_schedule *schedule);
+
+int xc_sched_credit2_params_set(xc_interface *xch,
+ uint32_t cpupool_id,
+ struct xen_sysctl_credit2_schedule *schedule);
+int xc_sched_credit2_params_get(xc_interface *xch,
+ uint32_t cpupool_id,
+ struct xen_sysctl_credit2_schedule *schedule);
+int xc_sched_credit2_domain_set(xc_interface *xch,
+ uint32_t domid,
+ struct xen_domctl_sched_credit2 *sdom);
+int xc_sched_credit2_domain_get(xc_interface *xch,
+ uint32_t domid,
+ struct xen_domctl_sched_credit2 *sdom);
+
+int xc_sched_rtds_domain_set(xc_interface *xch,
+ uint32_t domid,
+ struct xen_domctl_sched_rtds *sdom);
+int xc_sched_rtds_domain_get(xc_interface *xch,
+ uint32_t domid,
+ struct xen_domctl_sched_rtds *sdom);
+int xc_sched_rtds_vcpu_set(xc_interface *xch,
+ uint32_t domid,
+ struct xen_domctl_schedparam_vcpu *vcpus,
+ uint32_t num_vcpus);
+int xc_sched_rtds_vcpu_get(xc_interface *xch,
+ uint32_t domid,
+ struct xen_domctl_schedparam_vcpu *vcpus,
+ uint32_t num_vcpus);
+
+int
+xc_sched_arinc653_schedule_set(
+ xc_interface *xch,
+ uint32_t cpupool_id,
+ struct xen_sysctl_arinc653_schedule *schedule);
+
+int
+xc_sched_arinc653_schedule_get(
+ xc_interface *xch,
+ uint32_t cpupool_id,
+ struct xen_sysctl_arinc653_schedule *schedule);
+
+/**
+ * This function sends a trigger to a domain.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to send trigger
+ * @parm trigger the trigger type
+ * @parm vcpu the vcpu number to send trigger
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_send_trigger(xc_interface *xch,
+ uint32_t domid,
+ uint32_t trigger,
+ uint32_t vcpu);
+
+/**
+ * This function enables or disable debugging of a domain.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to send trigger
+ * @parm enable true to enable debugging
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_setdebugging(xc_interface *xch,
+ uint32_t domid,
+ unsigned int enable);
+
+/**
+ * This function audits the (top level) p2m of a domain
+ * and returns the different error counts, if any.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id whose top level p2m we
+ * want to audit
+ * @parm orphans count of m2p entries for valid
+ * domain pages containing an invalid value
+ * @parm m2p_bad count of m2p entries mismatching the
+ * associated p2m entry for this domain
+ * @parm p2m_bad count of p2m entries for this domain
+ * mismatching the associated m2p entry
+ * return 0 on success, -1 on failure
+ * errno values on failure include:
+ * -ENOSYS: not implemented
+ * -EFAULT: could not copy results back to guest
+ */
+int xc_domain_p2m_audit(xc_interface *xch,
+ uint32_t domid,
+ uint64_t *orphans,
+ uint64_t *m2p_bad,
+ uint64_t *p2m_bad);
+
+/**
+ * This function sets or clears the requirement that an access memory
+ * event listener is required on the domain.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to send trigger
+ * @parm enable true to require a listener
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_set_access_required(xc_interface *xch,
+ uint32_t domid,
+ unsigned int required);
+/**
+ * This function sets the handler of global VIRQs sent by the hypervisor
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id which will handle the VIRQ
+ * @parm virq the virq number (VIRQ_*)
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
+
+/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+ uint32_t cpupool_id;
+ uint32_t sched_id;
+ uint32_t n_dom;
+ xc_cpumap_t cpumap;
+} xc_cpupoolinfo_t;
+
+#define XC_CPUPOOL_POOLID_ANY 0xFFFFFFFF
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(xc_interface *xch,
+ uint32_t *ppoolid,
+ uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(xc_interface *xch,
+ uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (to be freed via xc_cpupool_infofree)
+ */
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+ uint32_t poolid);
+
+/**
+ * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm info area to free
+ */
+void xc_cpupool_infofree(xc_interface *xch,
+ xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(xc_interface *xch,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(xc_interface *xch,
+ uint32_t poolid,
+ int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(xc_interface *xch,
+ uint32_t poolid,
+ uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * return cpumap array on success, NULL else
+ */
+xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch);
+
+/*
+ * EVENT CHANNEL FUNCTIONS
+ *
+ * None of these do any logging.
+ */
+
+/* A port identifier is guaranteed to fit in 31 bits. */
+typedef int xc_evtchn_port_or_error_t;
+
+/**
+ * This function allocates an unbound port. Ports are named endpoints used for
+ * interdomain communication. This function is most useful in opening a
+ * well-known port within a domain to receive events on.
+ *
+ * NOTE: If you are allocating a *local* unbound port, you probably want to
+ * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
+ * ports *only* during domain creation.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm dom the ID of the local domain (the 'allocatee')
+ * @parm remote_dom the ID of the domain who will later bind
+ * @return allocated port (in @dom) on success, -1 on failure
+ */
+xc_evtchn_port_or_error_t
+xc_evtchn_alloc_unbound(xc_interface *xch,
+ uint32_t dom,
+ uint32_t remote_dom);
+
+int xc_evtchn_reset(xc_interface *xch,
+ uint32_t dom);
+
+typedef struct evtchn_status xc_evtchn_status_t;
+int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status);
+
+
+
+int xc_physdev_pci_access_modify(xc_interface *xch,
+ uint32_t domid,
+ int bus,
+ int dev,
+ int func,
+ int enable);
+
+int xc_readconsolering(xc_interface *xch,
+ char *buffer,
+ unsigned int *pnr_chars,
+ int clear, int incremental, uint32_t *pindex);
+
+int xc_send_debug_keys(xc_interface *xch, const char *keys);
+
+typedef struct xen_sysctl_physinfo xc_physinfo_t;
+typedef struct xen_sysctl_cputopo xc_cputopo_t;
+typedef struct xen_sysctl_numainfo xc_numainfo_t;
+typedef struct xen_sysctl_meminfo xc_meminfo_t;
+typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
+
+typedef uint32_t xc_cpu_to_node_t;
+typedef uint32_t xc_cpu_to_socket_t;
+typedef uint32_t xc_cpu_to_core_t;
+typedef uint64_t xc_node_to_memsize_t;
+typedef uint64_t xc_node_to_memfree_t;
+typedef uint32_t xc_node_to_node_dist_t;
+
+int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
+int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
+ xc_cputopo_t *cputopo);
+int xc_microcode_update(xc_interface *xch, const void *buf, size_t len);
+int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
+ xc_meminfo_t *meminfo, uint32_t *distance);
+int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
+ physdev_pci_device_t *devs, uint32_t *nodes);
+
+int xc_sched_id(xc_interface *xch,
+ int *sched_id);
+
+int xc_machphys_mfn_list(xc_interface *xch,
+ unsigned long max_extents,
+ xen_pfn_t *extent_start);
+
+typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
+int xc_getcpuinfo(xc_interface *xch, int max_cpus,
+ xc_cpuinfo_t *info, int *nr_cpus);
+
+int xc_domain_setmaxmem(xc_interface *xch,
+ uint32_t domid,
+ uint64_t max_memkb);
+
+int xc_domain_set_memmap_limit(xc_interface *xch,
+ uint32_t domid,
+ unsigned long map_limitkb);
+
+int xc_domain_setvnuma(xc_interface *xch,
+ uint32_t domid,
+ uint32_t nr_vnodes,
+ uint32_t nr_regions,
+ uint32_t nr_vcpus,
+ xen_vmemrange_t *vmemrange,
+ unsigned int *vdistance,
+ unsigned int *vcpu_to_vnode,
+ unsigned int *vnode_to_pnode);
+/*
+ * Retrieve vnuma configuration
+ * domid: IN, target domid
+ * nr_vnodes: IN/OUT, number of vnodes, not NULL
+ * nr_vmemranges: IN/OUT, number of vmemranges, not NULL
+ * nr_vcpus: IN/OUT, number of vcpus, not NULL
+ * vmemranges: OUT, an array which has length of nr_vmemranges
+ * vdistance: OUT, an array which has length of nr_vnodes * nr_vnodes
+ * vcpu_to_vnode: OUT, an array which has length of nr_vcpus
+ */
+int xc_domain_getvnuma(xc_interface *xch,
+ uint32_t domid,
+ uint32_t *nr_vnodes,
+ uint32_t *nr_vmemranges,
+ uint32_t *nr_vcpus,
+ xen_vmemrange_t *vmemrange,
+ unsigned int *vdistance,
+ unsigned int *vcpu_to_vnode);
+
+int xc_domain_soft_reset(xc_interface *xch,
+ uint32_t domid);
+
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * PC BIOS standard E820 types and structure.
+ */
+#define E820_RAM 1
+#define E820_RESERVED 2
+#define E820_ACPI 3
+#define E820_NVS 4
+#define E820_UNUSABLE 5
+
+#define E820MAX (128)
+
+struct e820entry {
+ uint64_t addr;
+ uint64_t size;
+ uint32_t type;
+} __attribute__((packed));
+int xc_domain_set_memory_map(xc_interface *xch,
+ uint32_t domid,
+ struct e820entry entries[],
+ uint32_t nr_entries);
+
+int xc_get_machine_memory_map(xc_interface *xch,
+ struct e820entry entries[],
+ uint32_t max_entries);
+#endif
+
+int xc_reserved_device_memory_map(xc_interface *xch,
+ uint32_t flags,
+ uint16_t seg,
+ uint8_t bus,
+ uint8_t devfn,
+ struct xen_reserved_device_memory entries[],
+ uint32_t *max_entries);
+int xc_domain_set_time_offset(xc_interface *xch,
+ uint32_t domid,
+ int32_t time_offset_seconds);
+
+int xc_domain_set_tsc_info(xc_interface *xch,
+ uint32_t domid,
+ uint32_t tsc_mode,
+ uint64_t elapsed_nsec,
+ uint32_t gtsc_khz,
+ uint32_t incarnation);
+
+int xc_domain_get_tsc_info(xc_interface *xch,
+ uint32_t domid,
+ uint32_t *tsc_mode,
+ uint64_t *elapsed_nsec,
+ uint32_t *gtsc_khz,
+ uint32_t *incarnation);
+
+int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
+
+int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
+
+int xc_domain_increase_reservation(xc_interface *xch,
+ uint32_t domid,
+ unsigned long nr_extents,
+ unsigned int extent_order,
+ unsigned int mem_flags,
+ xen_pfn_t *extent_start);
+
+int xc_domain_increase_reservation_exact(xc_interface *xch,
+ uint32_t domid,
+ unsigned long nr_extents,
+ unsigned int extent_order,
+ unsigned int mem_flags,
+ xen_pfn_t *extent_start);
+
+int xc_domain_decrease_reservation(xc_interface *xch,
+ uint32_t domid,
+ unsigned long nr_extents,
+ unsigned int extent_order,
+ xen_pfn_t *extent_start);
+
+int xc_domain_decrease_reservation_exact(xc_interface *xch,
+ uint32_t domid,
+ unsigned long nr_extents,
+ unsigned int extent_order,
+ xen_pfn_t *extent_start);
+
+int xc_domain_add_to_physmap(xc_interface *xch,
+ uint32_t domid,
+ unsigned int space,
+ unsigned long idx,
+ xen_pfn_t gpfn);
+
+int xc_domain_add_to_physmap_batch(xc_interface *xch,
+ uint32_t domid,
+ uint32_t foreign_domid,
+ unsigned int space,
+ unsigned int size,
+ xen_ulong_t *idxs,
+ xen_pfn_t *gfpns,
+ int *errs);
+
+int xc_domain_remove_from_physmap(xc_interface *xch,
+ uint32_t domid,
+ xen_pfn_t gpfn);
+
+int xc_domain_populate_physmap(xc_interface *xch,
+ uint32_t domid,
+ unsigned long nr_extents,
+ unsigned int extent_order,
+ unsigned int mem_flags,
+ xen_pfn_t *extent_start);
+
+int xc_domain_populate_physmap_exact(xc_interface *xch,
+ uint32_t domid,
+ unsigned long nr_extents,
+ unsigned int extent_order,
+ unsigned int mem_flags,
+ xen_pfn_t *extent_start);
+
+int xc_domain_claim_pages(xc_interface *xch,
+ uint32_t domid,
+ unsigned long nr_pages);
+
+int xc_domain_memory_exchange_pages(xc_interface *xch,
+ uint32_t domid,
+ unsigned long nr_in_extents,
+ unsigned int in_order,
+ xen_pfn_t *in_extents,
+ unsigned long nr_out_extents,
+ unsigned int out_order,
+ xen_pfn_t *out_extents);
+
+int xc_domain_set_pod_target(xc_interface *xch,
+ uint32_t domid,
+ uint64_t target_pages,
+ uint64_t *tot_pages,
+ uint64_t *pod_cache_pages,
+ uint64_t *pod_entries);
+
+int xc_domain_get_pod_target(xc_interface *xch,
+ uint32_t domid,
+ uint64_t *tot_pages,
+ uint64_t *pod_cache_pages,
+ uint64_t *pod_entries);
+
+int xc_domain_ioport_permission(xc_interface *xch,
+ uint32_t domid,
+ uint32_t first_port,
+ uint32_t nr_ports,
+ uint32_t allow_access);
+
+int xc_domain_irq_permission(xc_interface *xch,
+ uint32_t domid,
+ uint8_t pirq,
+ uint8_t allow_access);
+
+int xc_domain_iomem_permission(xc_interface *xch,
+ uint32_t domid,
+ unsigned long first_mfn,
+ unsigned long nr_mfns,
+ uint8_t allow_access);
+
+unsigned long xc_make_page_below_4G(xc_interface *xch, uint32_t domid,
+ unsigned long mfn);
+
+typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
+typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
+int xc_perfc_reset(xc_interface *xch);
+int xc_perfc_query_number(xc_interface *xch,
+ int *nbr_desc,
+ int *nbr_val);
+int xc_perfc_query(xc_interface *xch,
+ xc_hypercall_buffer_t *desc,
+ xc_hypercall_buffer_t *val);
+
+typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
+int xc_lockprof_reset(xc_interface *xch);
+int xc_lockprof_query_number(xc_interface *xch,
+ uint32_t *n_elems);
+int xc_lockprof_query(xc_interface *xch,
+ uint32_t *n_elems,
+ uint64_t *time,
+ xc_hypercall_buffer_t *data);
+
+void *xc_memalign(xc_interface *xch, size_t alignment, size_t size);
+
+/**
+ * Avoid using this function, as it does not work for all cases (such
+ * as 4M superpages, or guests using PSE36). Only used for debugging.
+ *
+ * Translates a virtual address in the context of a given domain and
+ * vcpu returning the GFN containing the address (that is, an MFN for
+ * PV guests, a PFN for HVM guests). Returns 0 for failure.
+ *
+ * @parm xch a handle on an open hypervisor interface
+ * @parm dom the domain to perform the translation in
+ * @parm vcpu the vcpu to perform the translation on
+ * @parm virt the virtual address to translate
+ */
+unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
+ int vcpu, unsigned long long virt);
+
+
+int xc_copy_to_domain_page(xc_interface *xch, uint32_t domid,
+ unsigned long dst_pfn, const char *src_page);
+
+int xc_clear_domain_pages(xc_interface *xch, uint32_t domid,
+ unsigned long dst_pfn, int num);
+
+static inline int xc_clear_domain_page(xc_interface *xch, uint32_t domid,
+ unsigned long dst_pfn)
+{
+ return xc_clear_domain_pages(xch, domid, dst_pfn, 1);
+}
+
+int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
+ uint32_t dom);
+
+/* System wide memory properties */
+int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn);
+
+/* Get current total pages allocated to a domain. */
+long xc_get_tot_pages(xc_interface *xch, uint32_t domid);
+
+/**
+ * This function retrieves the the number of bytes available
+ * in the heap in a specific range of address-widths and nodes.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to query
+ * @parm min_width the smallest address width to query (0 if don't care)
+ * @parm max_width the largest address width to query (0 if don't care)
+ * @parm node the node to query (-1 for all)
+ * @parm *bytes caller variable to put total bytes counted
+ * @return 0 on success, <0 on failure.
+ */
+int xc_availheap(xc_interface *xch, int min_width, int max_width, int node,
+ uint64_t *bytes);
+
+/*
+ * Trace Buffer Operations
+ */
+
+/**
+ * xc_tbuf_enable - enable tracing buffers
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm cnt size of tracing buffers to create (in pages)
+ * @parm mfn location to store mfn of the trace buffers to
+ * @parm size location to store the size (in bytes) of a trace buffer to
+ *
+ * Gets the machine address of the trace pointer area and the size of the
+ * per CPU buffers.
+ */
+int xc_tbuf_enable(xc_interface *xch, unsigned long pages,
+ unsigned long *mfn, unsigned long *size);
+
+/*
+ * Disable tracing buffers.
+ */
+int xc_tbuf_disable(xc_interface *xch);
+
+/**
+ * This function sets the size of the trace buffers. Setting the size
+ * is currently a one-shot operation that may be performed either at boot
+ * time or via this interface, not both. The buffer size must be set before
+ * enabling tracing.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm size the size in pages per cpu for the trace buffers
+ * @return 0 on success, -1 on failure.
+ */
+int xc_tbuf_set_size(xc_interface *xch, unsigned long size);
+
+/**
+ * This function retrieves the current size of the trace buffers.
+ * Note that the size returned is in terms of bytes, not pages.
+
+ * @parm xch a handle to an open hypervisor interface
+ * @parm size will contain the size in bytes for the trace buffers
+ * @return 0 on success, -1 on failure.
+ */
+int xc_tbuf_get_size(xc_interface *xch, unsigned long *size);
+
+int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask);
+
+int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask);
+
+int xc_domctl(xc_interface *xch, struct xen_domctl *domctl);
+int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl);
+
+int xc_version(xc_interface *xch, int cmd, void *arg);
+
+int xc_flask_op(xc_interface *xch, xen_flask_op_t *op);
+
+/*
+ * Subscribe to domain suspend via evtchn.
+ * Returns -1 on failure, in which case errno will be set appropriately.
+ * Just calls XEN_DOMCTL_subscribe - see the caveats for that domctl
+ * (in its doc comment in domctl.h).
+ */
+int xc_domain_subscribe_for_suspend(
+ xc_interface *xch, uint32_t domid, evtchn_port_t port);
+
+/**************************
+ * GRANT TABLE OPERATIONS *
+ **************************/
+
+/*
+ * These functions sometimes log messages as above, but not always.
+ */
+
+
+int xc_gnttab_op(xc_interface *xch, int cmd,
+ void * op, int op_size, int count);
+/* Logs iff hypercall bounce fails, otherwise doesn't. */
+
+int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query);
+int xc_gnttab_get_version(xc_interface *xch, uint32_t domid); /* Never logs */
+grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid, int *gnt_num);
+grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid, int *gnt_num);
+/* Sometimes these don't set errno [fixme], and sometimes they don't log. */
+
+int xc_physdev_map_pirq(xc_interface *xch,
+ uint32_t domid,
+ int index,
+ int *pirq);
+
+int xc_physdev_map_pirq_msi(xc_interface *xch,
+ uint32_t domid,
+ int index,
+ int *pirq,
+ int devfn,
+ int bus,
+ int entry_nr,
+ uint64_t table_base);
+
+int xc_physdev_unmap_pirq(xc_interface *xch,
+ uint32_t domid,
+ int pirq);
+
+/*
+ * LOGGING AND ERROR REPORTING
+ */
+
+
+#define XC_MAX_ERROR_MSG_LEN 1024
+typedef struct xc_error {
+ enum xc_error_code code;
+ char message[XC_MAX_ERROR_MSG_LEN];
+} xc_error;
+
+
+/*
+ * Convert an error code or level into a text description. Return values
+ * are pointers to fixed strings and do not need to be freed.
+ * Do not fail, but return pointers to generic strings if fed bogus input.
+ */
+const char *xc_error_code_to_desc(int code);
+
+/*
+ * Convert an errno value to a text description.
+ */
+const char *xc_strerror(xc_interface *xch, int errcode);
+
+
+/*
+ * Return a pointer to the last error with level XC_REPORT_ERROR. This
+ * pointer and the data pointed to are only valid until the next call
+ * to libxc in the same thread.
+ */
+const xc_error *xc_get_last_error(xc_interface *handle);
+
+/*
+ * Clear the last error
+ */
+void xc_clear_last_error(xc_interface *xch);
+
+int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value);
+int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value);
+
+/* Deprecated: use xc_hvm_param_set/get() instead. */
+int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value);
+int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value);
+
+/* HVM guest pass-through */
+int xc_assign_device(xc_interface *xch,
+ uint32_t domid,
+ uint32_t machine_sbdf,
+ uint32_t flag);
+
+int xc_get_device_group(xc_interface *xch,
+ uint32_t domid,
+ uint32_t machine_sbdf,
+ uint32_t max_sdevs,
+ uint32_t *num_sdevs,
+ uint32_t *sdev_array);
+
+int xc_test_assign_device(xc_interface *xch,
+ uint32_t domid,
+ uint32_t machine_sbdf);
+
+int xc_deassign_device(xc_interface *xch,
+ uint32_t domid,
+ uint32_t machine_sbdf);
+
+int xc_assign_dt_device(xc_interface *xch,
+ uint32_t domid,
+ char *path);
+int xc_test_assign_dt_device(xc_interface *xch,
+ uint32_t domid,
+ char *path);
+int xc_deassign_dt_device(xc_interface *xch,
+ uint32_t domid,
+ char *path);
+
+int xc_domain_memory_mapping(xc_interface *xch,
+ uint32_t domid,
+ unsigned long first_gfn,
+ unsigned long first_mfn,
+ unsigned long nr_mfns,
+ uint32_t add_mapping);
+
+int xc_domain_ioport_mapping(xc_interface *xch,
+ uint32_t domid,
+ uint32_t first_gport,
+ uint32_t first_mport,
+ uint32_t nr_ports,
+ uint32_t add_mapping);
+
+int xc_domain_update_msi_irq(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t gvec,
+ uint32_t pirq,
+ uint32_t gflags,
+ uint64_t gtable);
+
+int xc_domain_unbind_msi_irq(xc_interface *xch,
+ uint32_t domid,
+ uint32_t gvec,
+ uint32_t pirq,
+ uint32_t gflags);
+
+int xc_domain_bind_pt_irq(xc_interface *xch,
+ uint32_t domid,
+ uint8_t machine_irq,
+ uint8_t irq_type,
+ uint8_t bus,
+ uint8_t device,
+ uint8_t intx,
+ uint8_t isa_irq);
+
+int xc_domain_unbind_pt_irq(xc_interface *xch,
+ uint32_t domid,
+ uint8_t machine_irq,
+ uint8_t irq_type,
+ uint8_t bus,
+ uint8_t device,
+ uint8_t intx,
+ uint8_t isa_irq);
+
+int xc_domain_bind_pt_pci_irq(xc_interface *xch,
+ uint32_t domid,
+ uint8_t machine_irq,
+ uint8_t bus,
+ uint8_t device,
+ uint8_t intx);
+
+int xc_domain_bind_pt_isa_irq(xc_interface *xch,
+ uint32_t domid,
+ uint8_t machine_irq);
+
+int xc_domain_bind_pt_spi_irq(xc_interface *xch,
+ uint32_t domid,
+ uint16_t vspi,
+ uint16_t spi);
+
+int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
+ uint32_t domid,
+ uint16_t vspi,
+ uint16_t spi);
+
+/* Set the target domain */
+int xc_domain_set_target(xc_interface *xch,
+ uint32_t domid,
+ uint32_t target);
+
+/* Control the domain for debug */
+int xc_domain_debug_control(xc_interface *xch,
+ uint32_t domid,
+ uint32_t sop,
+ uint32_t vcpu);
+
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * CPUID policy data, expressed in the legacy XEND format.
+ *
+ * Policy is an array of strings, 32 chars long:
+ * policy[0] = eax
+ * policy[1] = ebx
+ * policy[2] = ecx
+ * policy[3] = edx
+ *
+ * The format of the string is the following:
+ * '1' -> force to 1
+ * '0' -> force to 0
+ * 'x' -> we don't care (use default)
+ * 'k' -> pass through host value
+ * 's' -> legacy alias for 'k'
+ */
+struct xc_xend_cpuid {
+ union {
+ struct {
+ uint32_t leaf, subleaf;
+ };
+ uint32_t input[2];
+ };
+ char *policy[4];
+};
+
+/*
+ * Make adjustments to the CPUID settings for a domain.
+ *
+ * This path is used in two cases. First, for fresh boots of the domain, and
+ * secondly for migrate-in/restore of pre-4.14 guests (where CPUID data was
+ * missing from the stream). The @restore parameter distinguishes these
+ * cases, and the generated policy must be compatible with a 4.13.
+ *
+ * Either pass a full new @featureset (and @nr_features), or adjust individual
+ * features (@pae, @itsc, @nested_virt).
+ *
+ * Then (optionally) apply legacy XEND overrides (@xend) to the result.
+ */
+int xc_cpuid_apply_policy(xc_interface *xch,
+ uint32_t domid, bool restore,
+ const uint32_t *featureset,
+ unsigned int nr_features, bool pae, bool itsc,
+ bool nested_virt, const struct xc_xend_cpuid *xend);
+int xc_mca_op(xc_interface *xch, struct xen_mc *mc);
+int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
+ xc_cpumap_t cpumap, unsigned int nr_cpus);
+#endif
+
+struct xc_px_val {
+ uint64_t freq; /* Px core frequency */
+ uint64_t residency; /* Px residency time */
+ uint64_t count; /* Px transition count */
+};
+
+struct xc_px_stat {
+ uint8_t total; /* total Px states */
+ uint8_t usable; /* usable Px states */
+ uint8_t last; /* last Px state */
+ uint8_t cur; /* current Px state */
+ uint64_t *trans_pt; /* Px transition table */
+ struct xc_px_val *pt;
+};
+
+int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px);
+int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt);
+int xc_pm_reset_pxstat(xc_interface *xch, int cpuid);
+
+struct xc_cx_stat {
+ uint32_t nr; /* entry nr in triggers[]/residencies[], incl C0 */
+ uint32_t last; /* last Cx state */
+ uint64_t idle_time; /* idle time from boot */
+ uint64_t *triggers; /* Cx trigger counts */
+ uint64_t *residencies; /* Cx residencies */
+ uint32_t nr_pc; /* entry nr in pc[] */
+ uint32_t nr_cc; /* entry nr in cc[] */
+ uint64_t *pc; /* 1-biased indexing (i.e. excl C0) */
+ uint64_t *cc; /* 1-biased indexing (i.e. excl C0) */
+};
+typedef struct xc_cx_stat xc_cx_stat_t;
+
+int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx);
+int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt);
+int xc_pm_reset_cxstat(xc_interface *xch, int cpuid);
+
+int xc_cpu_online(xc_interface *xch, int cpu);
+int xc_cpu_offline(xc_interface *xch, int cpu);
+int xc_smt_enable(xc_interface *xch);
+int xc_smt_disable(xc_interface *xch);
+
+/*
+ * cpufreq para name of this structure named
+ * same as sysfs file name of native linux
+ */
+typedef struct xen_userspace xc_userspace_t;
+typedef struct xen_ondemand xc_ondemand_t;
+
+struct xc_get_cpufreq_para {
+ /* IN/OUT variable */
+ uint32_t cpu_num;
+ uint32_t freq_num;
+ uint32_t gov_num;
+
+ /* for all governors */
+ /* OUT variable */
+ uint32_t *affected_cpus;
+ uint32_t *scaling_available_frequencies;
+ char *scaling_available_governors;
+ char scaling_driver[CPUFREQ_NAME_LEN];
+
+ uint32_t cpuinfo_cur_freq;
+ uint32_t cpuinfo_max_freq;
+ uint32_t cpuinfo_min_freq;
+ uint32_t scaling_cur_freq;
+
+ char scaling_governor[CPUFREQ_NAME_LEN];
+ uint32_t scaling_max_freq;
+ uint32_t scaling_min_freq;
+
+ /* for specific governor */
+ union {
+ xc_userspace_t userspace;
+ xc_ondemand_t ondemand;
+ } u;
+
+ int32_t turbo_enabled;
+};
+
+int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
+ struct xc_get_cpufreq_para *user_para);
+int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname);
+int xc_set_cpufreq_para(xc_interface *xch, int cpuid,
+ int ctrl_type, int ctrl_value);
+int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq);
+
+int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value);
+
+int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value);
+int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value);
+
+int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value);
+int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value);
+
+int xc_enable_turbo(xc_interface *xch, int cpuid);
+int xc_disable_turbo(xc_interface *xch, int cpuid);
+
+/**
+ * altp2m operations
+ */
+
+int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state);
+int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state);
+int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
+ uint32_t vcpuid, xen_pfn_t gfn);
+int xc_altp2m_set_vcpu_disable_notify(xc_interface *handle, uint32_t domid,
+ uint32_t vcpuid);
+int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
+ xenmem_access_t default_access, uint16_t *view_id);
+int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
+ uint16_t view_id);
+/* Switch all vCPUs of the domain to the specified altp2m view */
+int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
+ uint16_t view_id);
+int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t gfn, bool sve);
+int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t first_gfn,
+ xen_pfn_t last_gfn, bool sve,
+ xen_pfn_t *error_gfn, int32_t *error_code);
+int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t gfn, bool *sve);
+int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t gfn,
+ xenmem_access_t access);
+int xc_altp2m_set_mem_access_multi(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, uint8_t *access,
+ uint64_t *gfns, uint32_t nr);
+int xc_altp2m_get_mem_access(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t gfn,
+ xenmem_access_t *access);
+int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t old_gfn,
+ xen_pfn_t new_gfn);
+int xc_altp2m_get_vcpu_p2m_idx(xc_interface *handle, uint32_t domid,
+ uint32_t vcpuid, uint16_t *p2midx);
+/*
+ * Set view visibility for xc_altp2m_switch_to_view and vmfunc.
+ * Note: If altp2m mode is set to mixed the guest is able to change the view
+ * visibility and then call vmfunc.
+ */
+int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, bool visible);
+
+/**
+ * Mem paging operations.
+ * Paging is supported only on the x86 architecture in 64 bit mode, with
+ * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
+ * support is considered experimental.
+ */
+int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
+int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
+int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
+int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
+ uint64_t gfn);
+int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
+int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
+int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
+ uint64_t gfn, void *buffer);
+
+/**
+ * Access tracking operations.
+ * Supported only on Intel EPT 64 bit processors.
+ */
+
+/*
+ * Set a range of memory to a specific access.
+ * Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
+ * XENMEM_access_ + (rwx), and XENMEM_access_rx2rw
+ */
+int xc_set_mem_access(xc_interface *xch, uint32_t domain_id,
+ xenmem_access_t access, uint64_t first_pfn,
+ uint32_t nr);
+
+/*
+ * Set an array of pages to their respective access in the access array.
+ * The nr parameter specifies the size of the pages and access arrays.
+ * The same allowed access types as for xc_set_mem_access() apply.
+ */
+int xc_set_mem_access_multi(xc_interface *xch, uint32_t domain_id,
+ uint8_t *access, uint64_t *pages,
+ uint32_t nr);
+
+/*
+ * Gets the mem access for the given page (returned in access on success)
+ */
+int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
+ uint64_t pfn, xenmem_access_t *access);
+
+/*
+ * Returns the VM_EVENT_INTERFACE version.
+ */
+int xc_vm_event_get_version(xc_interface *xch);
+
+/***
+ * Monitor control operations.
+ *
+ * Enables the VM event monitor ring and returns the mapped ring page.
+ * This ring is used to deliver mem_access events, as well a set of additional
+ * events that can be enabled with the xc_monitor_* functions.
+ *
+ * Will return NULL on error.
+ * Caller has to unmap this page when done.
+ */
+void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
+int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
+int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
+/*
+ * Get a bitmap of supported monitor events in the form
+ * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
+ */
+int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
+ uint32_t *capabilities);
+int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
+ uint16_t index, bool enable, bool sync,
+ uint64_t bitmask, bool onchangeonly);
+/*
+ * A list of MSR indices can usually be found in /usr/include/asm/msr-index.h.
+ * Please consult the Intel/AMD manuals for more information on
+ * non-architectural indices.
+ */
+int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
+ bool enable, bool onchangeonly);
+int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id, bool enable);
+int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
+ bool enable);
+int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
+ bool enable);
+int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id,
+ bool enable, bool sync, bool allow_userspace);
+/*
+ * Disables page-walk mem_access events by emulating. If the
+ * emulation can not be performed then a VM_EVENT_REASON_EMUL_UNIMPLEMENTED
+ * event will be issued.
+ */
+int xc_monitor_inguest_pagefault(xc_interface *xch, uint32_t domain_id,
+ bool disable);
+int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
+ bool enable, bool sync);
+int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable);
+int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
+ bool enable);
+int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
+ bool enable);
+/**
+ * This function enables / disables emulation for each REP for a
+ * REP-compatible instruction.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domain_id the domain id one wants to get the node affinity of.
+ * @parm enable if 0 optimize when possible, else emulate each REP.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
+ bool enable);
+
+/***
+ * Memory sharing operations.
+ *
+ * Unles otherwise noted, these calls return 0 on succes, -1 and errno on
+ * failure.
+ *
+ * Sharing is supported only on the x86 architecture in 64 bit mode, with
+ * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
+ * support is considered experimental.
+
+ * Calls below return ENOSYS if not in the x86_64 architecture.
+ * Calls below return ENODEV if the domain does not support HAP.
+ * Calls below return ESRCH if the specified domain does not exist.
+ * Calls below return EPERM if the caller is unprivileged for this domain.
+ */
+
+/* Turn on/off sharing for the domid, depending on the enable flag.
+ *
+ * Returns EXDEV if trying to enable and the domain has had a PCI device
+ * assigned for passthrough (these two features are mutually exclusive).
+ *
+ * When sharing for a domain is turned off, the domain may still reference
+ * shared pages. Unsharing happens lazily. */
+int xc_memshr_control(xc_interface *xch,
+ uint32_t domid,
+ int enable);
+
+/* Create a communication ring in which the hypervisor will place ENOMEM
+ * notifications.
+ *
+ * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
+ * allocated, and thus the out-of-memory error occurr.
+ *
+ * For complete examples on how to plumb a notification ring, look into
+ * xenpaging or xen-access.
+ *
+ * On receipt of a notification, the helper should ensure there is memory
+ * available to the domain before retrying.
+ *
+ * If a domain encounters an ENOMEM condition when sharing and this ring
+ * has not been set up, the hypervisor will crash the domain.
+ *
+ * Fails with:
+ * EINVAL if port is NULL
+ * EINVAL if the sharing ring has already been enabled
+ * ENOSYS if no guest gfn has been specified to host the ring via an hvm param
+ * EINVAL if the gfn for the ring has not been populated
+ * ENOENT if the gfn for the ring is paged out, or cannot be unshared
+ * EINVAL if the gfn for the ring cannot be written to
+ * EINVAL if the domain is dying
+ * ENOSPC if an event channel cannot be allocated for the ring
+ * ENOMEM if memory cannot be allocated for internal data structures
+ * EINVAL or EACCESS if the request is denied by the security policy
+ */
+
+int xc_memshr_ring_enable(xc_interface *xch,
+ uint32_t domid,
+ uint32_t *port);
+/* Disable the ring for ENOMEM communication.
+ * May fail with EINVAL if the ring was not enabled in the first place.
+ */
+int xc_memshr_ring_disable(xc_interface *xch,
+ uint32_t domid);
+
+/*
+ * Calls below return EINVAL if sharing has not been enabled for the domain
+ * Calls below return EINVAL if the domain is dying
+ */
+/* Once a reponse to an ENOMEM notification is prepared, the tool can
+ * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
+ * event channel kick and/or this call. */
+int xc_memshr_domain_resume(xc_interface *xch,
+ uint32_t domid);
+
+/* Select a page for sharing.
+ *
+ * A 64 bit opaque handle will be stored in handle. The hypervisor ensures
+ * that if the page is modified, the handle will be invalidated, and future
+ * users of it will fail. If the page has already been selected and is still
+ * associated to a valid handle, the existing handle will be returned.
+ *
+ * May fail with:
+ * EINVAL if the gfn is not populated or not sharable (mmio, etc)
+ * ENOMEM if internal data structures cannot be allocated
+ * E2BIG if the page is being referenced by other subsytems (e.g. qemu)
+ * ENOENT or EEXIST if there are internal hypervisor errors.
+ */
+int xc_memshr_nominate_gfn(xc_interface *xch,
+ uint32_t domid,
+ unsigned long gfn,
+ uint64_t *handle);
+/* Same as above, but instead of a guest frame number, the input is a grant
+ * reference provided by the guest.
+ *
+ * May fail with EINVAL if the grant reference is invalid.
+ */
+int xc_memshr_nominate_gref(xc_interface *xch,
+ uint32_t domid,
+ grant_ref_t gref,
+ uint64_t *handle);
+
+/* The three calls below may fail with
+ * 10 (or -XENMEM_SHARING_OP_S_HANDLE_INVALID) if the handle passed as source
+ * is invalid.
+ * 9 (or -XENMEM_SHARING_OP_C_HANDLE_INVALID) if the handle passed as client is
+ * invalid.
+ */
+/* Share two nominated guest pages.
+ *
+ * If the call succeeds, both pages will point to the same backing frame (or
+ * mfn). The hypervisor will verify the handles are still valid, but it will
+ * not perform any sanity checking on the contens of the pages (the selection
+ * mechanism for sharing candidates is entirely up to the user-space tool).
+ *
+ * After successful sharing, the client handle becomes invalid. Both <domain,
+ * gfn> tuples point to the same mfn with the same handle, the one specified as
+ * source. Either 3-tuple can be specified later for further re-sharing.
+ */
+int xc_memshr_share_gfns(xc_interface *xch,
+ uint32_t source_domain,
+ unsigned long source_gfn,
+ uint64_t source_handle,
+ uint32_t client_domain,
+ unsigned long client_gfn,
+ uint64_t client_handle);
+
+/* Same as above, but share two grant references instead.
+ *
+ * May fail with EINVAL if either grant reference is invalid.
+ */
+int xc_memshr_share_grefs(xc_interface *xch,
+ uint32_t source_domain,
+ grant_ref_t source_gref,
+ uint64_t source_handle,
+ uint32_t client_domain,
+ grant_ref_t client_gref,
+ uint64_t client_handle);
+
+/* Allows to add to the guest physmap of the client domain a shared frame
+ * directly.
+ *
+ * May additionally fail with
+ * 9 (-XENMEM_SHARING_OP_C_HANDLE_INVALID) if the physmap entry for the gfn is
+ * not suitable.
+ * ENOMEM if internal data structures cannot be allocated.
+ * ENOENT if there is an internal hypervisor error.
+ */
+int xc_memshr_add_to_physmap(xc_interface *xch,
+ uint32_t source_domain,
+ unsigned long source_gfn,
+ uint64_t source_handle,
+ uint32_t client_domain,
+ unsigned long client_gfn);
+
+/* Allows to deduplicate a range of memory of a client domain. Using
+ * this function is equivalent of calling xc_memshr_nominate_gfn for each gfn
+ * in the two domains followed by xc_memshr_share_gfns.
+ *
+ * May fail with -EINVAL if the source and client domain have different
+ * memory size or if memory sharing is not enabled on either of the domains.
+ * May also fail with -ENOMEM if there isn't enough memory available to store
+ * the sharing metadata before deduplication can happen.
+ */
+int xc_memshr_range_share(xc_interface *xch,
+ uint32_t source_domain,
+ uint32_t client_domain,
+ uint64_t first_gfn,
+ uint64_t last_gfn);
+
+int xc_memshr_fork(xc_interface *xch,
+ uint32_t source_domain,
+ uint32_t client_domain,
+ bool allow_with_iommu,
+ bool block_interrupts);
+
+/*
+ * Note: this function is only intended to be used on short-lived forks that
+ * haven't yet aquired a lot of memory. In case the fork has a lot of memory
+ * it is likely more performant to create a new fork with xc_memshr_fork.
+ *
+ * With VMs that have a lot of memory this call may block for a long time.
+ */
+int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain);
+
+/* Debug calls: return the number of pages referencing the shared frame backing
+ * the input argument. Should be one or greater.
+ *
+ * May fail with EINVAL if there is no backing shared frame for the input
+ * argument.
+ */
+int xc_memshr_debug_gfn(xc_interface *xch,
+ uint32_t domid,
+ unsigned long gfn);
+/* May additionally fail with EINVAL if the grant reference is invalid. */
+int xc_memshr_debug_gref(xc_interface *xch,
+ uint32_t domid,
+ grant_ref_t gref);
+
+/* Audits the share subsystem.
+ *
+ * Returns ENOSYS if not supported (may not be compiled into the hypervisor).
+ *
+ * Returns the number of errors found during auditing otherwise. May be (should
+ * be!) zero.
+ *
+ * If debugtrace support has been compiled into the hypervisor and is enabled,
+ * verbose descriptions for the errors are available in the hypervisor console.
+ */
+int xc_memshr_audit(xc_interface *xch);
+
+/* Stats reporting.
+ *
+ * At any point in time, the following equality should hold for a host:
+ *
+ * Let dominfo(d) be the xc_dominfo_t struct filled by a call to
+ * xc_domain_getinfo(d)
+ *
+ * The summation of dominfo(d)->shr_pages for all domains in the system
+ * should be equal to
+ * xc_sharing_freed_pages + xc_sharing_used_frames
+ */
+/*
+ * This function returns the total number of pages freed by using sharing
+ * on the system. For example, if two domains contain a single entry in
+ * their p2m table that points to the same shared page (and no other pages
+ * in the system are shared), then this function should return 1.
+ */
+long xc_sharing_freed_pages(xc_interface *xch);
+
+/*
+ * This function returns the total number of frames occupied by shared
+ * pages on the system. This is independent of the number of domains
+ * pointing at these frames. For example, in the above scenario this
+ * should return 1. (And dominfo(d) for each of the two domains should return 1
+ * as well).
+ *
+ * Note that some of these sharing_used_frames may be referenced by
+ * a single domain page, and thus not realize any savings. The same
+ * applies to some of the pages counted in dominfo(d)->shr_pages.
+ */
+long xc_sharing_used_frames(xc_interface *xch);
+/*** End sharing interface ***/
+
+int xc_flask_load(xc_interface *xc_handle, char *buf, uint32_t size);
+int xc_flask_context_to_sid(xc_interface *xc_handle, char *buf, uint32_t size, uint32_t *sid);
+int xc_flask_sid_to_context(xc_interface *xc_handle, int sid, char *buf, uint32_t size);
+int xc_flask_getenforce(xc_interface *xc_handle);
+int xc_flask_setenforce(xc_interface *xc_handle, int mode);
+int xc_flask_getbool_byid(xc_interface *xc_handle, int id, char *name, uint32_t size, int *curr, int *pend);
+int xc_flask_getbool_byname(xc_interface *xc_handle, char *name, int *curr, int *pend);
+int xc_flask_setbool(xc_interface *xc_handle, char *name, int value, int commit);
+int xc_flask_add_pirq(xc_interface *xc_handle, unsigned int pirq, char *scontext);
+int xc_flask_add_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high,
+ char *scontext);
+int xc_flask_add_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high,
+ char *scontext);
+int xc_flask_add_device(xc_interface *xc_handle, unsigned long device, char *scontext);
+int xc_flask_del_pirq(xc_interface *xc_handle, unsigned int pirq);
+int xc_flask_del_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high);
+int xc_flask_del_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high);
+int xc_flask_del_device(xc_interface *xc_handle, unsigned long device);
+int xc_flask_access(xc_interface *xc_handle, const char *scon, const char *tcon,
+ uint16_t tclass, uint32_t req,
+ uint32_t *allowed, uint32_t *decided,
+ uint32_t *auditallow, uint32_t *auditdeny,
+ uint32_t *seqno);
+int xc_flask_avc_cachestats(xc_interface *xc_handle, char *buf, int size);
+int xc_flask_policyvers(xc_interface *xc_handle);
+int xc_flask_avc_hashstats(xc_interface *xc_handle, char *buf, int size);
+int xc_flask_getavc_threshold(xc_interface *xc_handle);
+int xc_flask_setavc_threshold(xc_interface *xc_handle, int threshold);
+int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid);
+
+struct elf_binary;
+void xc_elf_set_logfile(xc_interface *xch, struct elf_binary *elf,
+ int verbose);
+/* Useful for callers who also use libelf. */
+
+/*
+ * Execute an image previously loaded with xc_kexec_load().
+ *
+ * Does not return on success.
+ *
+ * Fails with:
+ * ENOENT if the specified image has not been loaded.
+ */
+int xc_kexec_exec(xc_interface *xch, int type);
+
+/*
+ * Find the machine address and size of certain memory areas.
+ *
+ * KEXEC_RANGE_MA_CRASH crash area
+ * KEXEC_RANGE_MA_XEN Xen itself
+ * KEXEC_RANGE_MA_CPU CPU note for CPU number 'nr'
+ * KEXEC_RANGE_MA_XENHEAP xenheap
+ * KEXEC_RANGE_MA_EFI_MEMMAP EFI Memory Map
+ * KEXEC_RANGE_MA_VMCOREINFO vmcoreinfo
+ *
+ * Fails with:
+ * EINVAL if the range or CPU number isn't valid.
+ */
+int xc_kexec_get_range(xc_interface *xch, int range, int nr,
+ uint64_t *size, uint64_t *start);
+
+/*
+ * Load a kexec image into memory.
+ *
+ * The image may be of type KEXEC_TYPE_DEFAULT (executed on request)
+ * or KEXEC_TYPE_CRASH (executed on a crash).
+ *
+ * The image architecture may be a 32-bit variant of the hypervisor
+ * architecture (e.g, EM_386 on a x86-64 hypervisor).
+ *
+ * Fails with:
+ * ENOMEM if there is insufficient memory for the new image.
+ * EINVAL if the image does not fit into the crash area or the entry
+ * point isn't within one of segments.
+ * EBUSY if another image is being executed.
+ */
+int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch,
+ uint64_t entry_maddr,
+ uint32_t nr_segments, xen_kexec_segment_t *segments);
+
+/*
+ * Unload a kexec image.
+ *
+ * This prevents a KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH image from
+ * being executed. The crash images are not cleared from the crash
+ * region.
+ */
+int xc_kexec_unload(xc_interface *xch, int type);
+
+/*
+ * Find out whether the image has been succesfully loaded.
+ *
+ * The type can be either KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
+ * If zero is returned, that means no image is loaded for the type.
+ * If one is returned, that means an image is loaded for the type.
+ * Otherwise, negative return value indicates error.
+ */
+int xc_kexec_status(xc_interface *xch, int type);
+
+typedef xenpf_resource_entry_t xc_resource_entry_t;
+
+/*
+ * Generic resource operation which contains multiple non-preemptible
+ * resource access entries that passed to xc_resource_op().
+ */
+struct xc_resource_op {
+ uint64_t result; /* on return, check this field first */
+ uint32_t cpu; /* which cpu to run */
+ uint32_t nr_entries; /* number of resource entries */
+ xc_resource_entry_t *entries;
+};
+
+typedef struct xc_resource_op xc_resource_op_t;
+int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops);
+
+#if defined(__i386__) || defined(__x86_64__)
+enum xc_psr_cmt_type {
+ XC_PSR_CMT_L3_OCCUPANCY,
+ XC_PSR_CMT_TOTAL_MEM_COUNT,
+ XC_PSR_CMT_LOCAL_MEM_COUNT,
+};
+typedef enum xc_psr_cmt_type xc_psr_cmt_type;
+
+enum xc_psr_type {
+ XC_PSR_CAT_L3_CBM = 1,
+ XC_PSR_CAT_L3_CBM_CODE = 2,
+ XC_PSR_CAT_L3_CBM_DATA = 3,
+ XC_PSR_CAT_L2_CBM = 4,
+ XC_PSR_MBA_THRTL = 5,
+};
+typedef enum xc_psr_type xc_psr_type;
+
+enum xc_psr_feat_type {
+ XC_PSR_CAT_L3,
+ XC_PSR_CAT_L2,
+ XC_PSR_MBA,
+};
+typedef enum xc_psr_feat_type xc_psr_feat_type;
+
+union xc_psr_hw_info {
+ struct {
+ uint32_t cos_max;
+ uint32_t cbm_len;
+ bool cdp_enabled;
+ } cat;
+
+ struct {
+ uint32_t cos_max;
+ uint32_t thrtl_max;
+ bool linear;
+ } mba;
+};
+typedef union xc_psr_hw_info xc_psr_hw_info;
+
+int xc_psr_cmt_attach(xc_interface *xch, uint32_t domid);
+int xc_psr_cmt_detach(xc_interface *xch, uint32_t domid);
+int xc_psr_cmt_get_domain_rmid(xc_interface *xch, uint32_t domid,
+ uint32_t *rmid);
+int xc_psr_cmt_get_total_rmid(xc_interface *xch, uint32_t *total_rmid);
+int xc_psr_cmt_get_l3_upscaling_factor(xc_interface *xch,
+ uint32_t *upscaling_factor);
+int xc_psr_cmt_get_l3_event_mask(xc_interface *xch, uint32_t *event_mask);
+int xc_psr_cmt_get_l3_cache_size(xc_interface *xch, uint32_t cpu,
+ uint32_t *l3_cache_size);
+int xc_psr_cmt_get_data(xc_interface *xch, uint32_t rmid, uint32_t cpu,
+ uint32_t psr_cmt_type, uint64_t *monitor_data,
+ uint64_t *tsc);
+int xc_psr_cmt_enabled(xc_interface *xch);
+
+int xc_psr_set_domain_data(xc_interface *xch, uint32_t domid,
+ xc_psr_type type, uint32_t target,
+ uint64_t data);
+int xc_psr_get_domain_data(xc_interface *xch, uint32_t domid,
+ xc_psr_type type, uint32_t target,
+ uint64_t *data);
+int xc_psr_get_hw_info(xc_interface *xch, uint32_t socket,
+ xc_psr_feat_type type, xc_psr_hw_info *hw_info);
+
+int xc_get_cpu_levelling_caps(xc_interface *xch, uint32_t *caps);
+int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
+ uint32_t *nr_features, uint32_t *featureset);
+
+int xc_get_cpu_policy_size(xc_interface *xch, uint32_t *nr_leaves,
+ uint32_t *nr_msrs);
+int xc_get_system_cpu_policy(xc_interface *xch, uint32_t index,
+ uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
+ uint32_t *nr_msrs, xen_msr_entry_t *msrs);
+int xc_get_domain_cpu_policy(xc_interface *xch, uint32_t domid,
+ uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
+ uint32_t *nr_msrs, xen_msr_entry_t *msrs);
+int xc_set_domain_cpu_policy(xc_interface *xch, uint32_t domid,
+ uint32_t nr_leaves, xen_cpuid_leaf_t *leaves,
+ uint32_t nr_msrs, xen_msr_entry_t *msrs,
+ uint32_t *err_leaf_p, uint32_t *err_subleaf_p,
+ uint32_t *err_msr_p);
+
+uint32_t xc_get_cpu_featureset_size(void);
+
+enum xc_static_cpu_featuremask {
+ XC_FEATUREMASK_KNOWN,
+ XC_FEATUREMASK_SPECIAL,
+ XC_FEATUREMASK_PV_MAX,
+ XC_FEATUREMASK_PV_DEF,
+ XC_FEATUREMASK_HVM_SHADOW_MAX,
+ XC_FEATUREMASK_HVM_SHADOW_DEF,
+ XC_FEATUREMASK_HVM_HAP_MAX,
+ XC_FEATUREMASK_HVM_HAP_DEF,
+};
+const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
+
+#endif
+
+int xc_livepatch_upload(xc_interface *xch,
+ char *name, unsigned char *payload, uint32_t size);
+
+int xc_livepatch_get(xc_interface *xch,
+ char *name,
+ xen_livepatch_status_t *status);
+
+/*
+ * Get a number of available payloads and get actual total size of
+ * the payloads' name and metadata arrays.
+ *
+ * This functions is typically executed first before the xc_livepatch_list()
+ * to obtain the sizes and correctly allocate all necessary data resources.
+ *
+ * The return value is zero if the hypercall completed successfully.
+ *
+ * If there was an error performing the sysctl operation, the return value
+ * will contain the hypercall error code value.
+ */
+int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
+ uint32_t *name_total_size,
+ uint32_t *metadata_total_size);
+
+/*
+ * The heart of this function is to get an array of the following objects:
+ * - xen_livepatch_status_t: states and return codes of payloads
+ * - name: names of payloads
+ * - len: lengths of corresponding payloads' names
+ * - metadata: payloads' metadata
+ * - metadata_len: lengths of corresponding payloads' metadata
+ *
+ * However it is complex because it has to deal with the hypervisor
+ * returning some of the requested data or data being stale
+ * (another hypercall might alter the list).
+ *
+ * The parameters that the function expects to contain data from
+ * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
+ * 'left' are also updated with the number of entries filled out
+ * and respectively the number of entries left to get from hypervisor.
+ *
+ * It is expected that the caller of this function will first issue the
+ * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
+ * and all metadata as well as the current number of payload entries.
+ * The total sizes are required and supplied via the 'name_total_size' and
+ * 'metadata_total_size' parameters.
+ *
+ * The 'max' is to be provided by the caller with the maximum number of
+ * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
+ * can be filled up with.
+ *
+ * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
+ * structure size.
+ *
+ * Each entry in the 'name' array may have an arbitrary size.
+ *
+ * Each entry in the 'len' array is expected to be of uint32_t size.
+ *
+ * Each entry in the 'metadata' array may have an arbitrary size.
+ *
+ * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
+ *
+ * The return value is zero if the hypercall completed successfully.
+ * Note that the return value is _not_ the amount of entries filled
+ * out - that is saved in 'done'.
+ *
+ * If there was an error performing the operation, the return value
+ * will contain an negative -EXX type value. The 'done' and 'left'
+ * will contain the number of entries that had been succesfully
+ * retrieved (if any).
+ */
+int xc_livepatch_list(xc_interface *xch, const unsigned int max,
+ const unsigned int start,
+ struct xen_livepatch_status *info,
+ char *name, uint32_t *len,
+ const uint32_t name_total_size,
+ char *metadata, uint32_t *metadata_len,
+ const uint32_t metadata_total_size,
+ unsigned int *done, unsigned int *left);
+
+/*
+ * The operations are asynchronous and the hypervisor may take a while
+ * to complete them. The `timeout` offers an option to expire the
+ * operation if it could not be completed within the specified time
+ * (in ns). Value of 0 means let hypervisor decide the best timeout.
+ * The `flags` allows to pass extra parameters to the actions.
+ */
+int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
+int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
+int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
+int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
+
+/*
+ * Ensure cache coherency after memory modifications. A call to this function
+ * is only required on ARM as the x86 architecture provides cache coherency
+ * guarantees. Calling this function on x86 is allowed but has no effect.
+ */
+int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
+ xen_pfn_t start_pfn, xen_pfn_t nr_pfns);
+
+/* Compat shims */
+#include "xenctrl_compat.h"
+
+#endif /* XENCTRL_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * Compat shims for use of 3rd party consumers of libxenctrl
+ * functionality which has been split into separate libraries.
+ *
+ * New code should use the separate libraries.
+ *
+ * Each interface must be opted-into separately by defining:
+ *
+ * XC_WANT_COMPAT_EVTCHN_API
+ * - Functions relating to /dev/xen/evtchn
+ */
+#ifndef XENCTRL_COMPAT_H
+#define XENCTRL_COMPAT_H
+
+#ifdef XC_WANT_COMPAT_MAP_FOREIGN_API
+/**
+ * Memory maps a range within one domain to a local address range. Mappings
+ * should be unmapped with munmap and should follow the same rules as mmap
+ * regarding page alignment. Returns NULL on failure.
+ *
+ * @parm xch a handle on an open hypervisor interface
+ * @parm dom the domain to map memory from
+ * @parm size the amount of memory to map (in multiples of page size)
+ * @parm prot same flag as in mmap().
+ * @parm mfn the frame address to map.
+ */
+void *xc_map_foreign_range(xc_interface *xch, uint32_t dom,
+ int size, int prot,
+ unsigned long mfn );
+
+void *xc_map_foreign_pages(xc_interface *xch, uint32_t dom, int prot,
+ const xen_pfn_t *arr, int num );
+
+/* Nothing within the library itself other than the compat wrapper
+ * itself should be using this, everything inside has access to
+ * xenforeignmemory_map().
+ */
+#if !defined(XC_INTERNAL_COMPAT_MAP_FOREIGN_API) || \
+ defined(XC_BUILDING_COMPAT_MAP_FOREIGN_API)
+/**
+ * Like xc_map_foreign_pages(), except it can succeed partially.
+ * When a page cannot be mapped, its respective field in @err is
+ * set to the corresponding errno value.
+ */
+void *xc_map_foreign_bulk(xc_interface *xch, uint32_t dom, int prot,
+ const xen_pfn_t *arr, int *err, unsigned int num);
+#endif
+
+#endif
+
+#ifdef XC_WANT_COMPAT_EVTCHN_API
+
+typedef struct xenevtchn_handle xc_evtchn;
+typedef xc_evtchn_port_or_error_t evtchn_port_or_error_t;
+
+xc_evtchn *xc_evtchn_open(xentoollog_logger *logger,
+ unsigned open_flags);
+int xc_evtchn_close(xc_evtchn *xce);
+int xc_evtchn_fd(xc_evtchn *xce);
+int xc_evtchn_notify(xc_evtchn *xce, evtchn_port_t port);
+xc_evtchn_port_or_error_t
+xc_evtchn_bind_unbound_port(xc_evtchn *xce, uint32_t domid);
+xc_evtchn_port_or_error_t
+xc_evtchn_bind_interdomain(xc_evtchn *xce, uint32_t domid,
+ evtchn_port_t remote_port);
+xc_evtchn_port_or_error_t
+xc_evtchn_bind_virq(xc_evtchn *xce, unsigned int virq);
+int xc_evtchn_unbind(xc_evtchn *xce, evtchn_port_t port);
+xc_evtchn_port_or_error_t
+xc_evtchn_pending(xc_evtchn *xce);
+int xc_evtchn_unmask(xc_evtchn *xce, evtchn_port_t port);
+
+#endif /* XC_WANT_COMPAT_EVTCHN_API */
+
+#ifdef XC_WANT_COMPAT_GNTTAB_API
+
+typedef struct xengntdev_handle xc_gnttab;
+
+xc_gnttab *xc_gnttab_open(xentoollog_logger *logger,
+ unsigned open_flags);
+int xc_gnttab_close(xc_gnttab *xcg);
+void *xc_gnttab_map_grant_ref(xc_gnttab *xcg,
+ uint32_t domid,
+ uint32_t ref,
+ int prot);
+void *xc_gnttab_map_grant_refs(xc_gnttab *xcg,
+ uint32_t count,
+ uint32_t *domids,
+ uint32_t *refs,
+ int prot);
+void *xc_gnttab_map_domain_grant_refs(xc_gnttab *xcg,
+ uint32_t count,
+ uint32_t domid,
+ uint32_t *refs,
+ int prot);
+void *xc_gnttab_map_grant_ref_notify(xc_gnttab *xcg,
+ uint32_t domid,
+ uint32_t ref,
+ int prot,
+ uint32_t notify_offset,
+ evtchn_port_t notify_port);
+int xc_gnttab_munmap(xc_gnttab *xcg,
+ void *start_address,
+ uint32_t count);
+int xc_gnttab_set_max_grants(xc_gnttab *xcg,
+ uint32_t count);
+
+typedef struct xengntdev_handle xc_gntshr;
+
+xc_gntshr *xc_gntshr_open(xentoollog_logger *logger,
+ unsigned open_flags);
+int xc_gntshr_close(xc_gntshr *xcg);
+void *xc_gntshr_share_pages(xc_gntshr *xcg, uint32_t domid,
+ int count, uint32_t *refs, int writable);
+void *xc_gntshr_share_page_notify(xc_gntshr *xcg, uint32_t domid,
+ uint32_t *ref, int writable,
+ uint32_t notify_offset,
+ evtchn_port_t notify_port);
+int xc_gntshr_munmap(xc_gntshr *xcg, void *start_address, uint32_t count);
+
+#endif /* XC_WANT_COMPAT_GNTTAB_API */
+
+#ifdef XC_WANT_COMPAT_DEVICEMODEL_API
+
+int xc_hvm_create_ioreq_server(
+ xc_interface *xch, uint32_t domid, int handle_bufioreq,
+ ioservid_t *id);
+int xc_hvm_get_ioreq_server_info(
+ xc_interface *xch, uint32_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
+ xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_port);
+int xc_hvm_map_io_range_to_ioreq_server(
+ xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
+ uint64_t start, uint64_t end);
+int xc_hvm_unmap_io_range_from_ioreq_server(
+ xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
+ uint64_t start, uint64_t end);
+int xc_hvm_map_pcidev_to_ioreq_server(
+ xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
+ uint8_t bus, uint8_t device, uint8_t function);
+int xc_hvm_unmap_pcidev_from_ioreq_server(
+ xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
+ uint8_t bus, uint8_t device, uint8_t function);
+int xc_hvm_destroy_ioreq_server(
+ xc_interface *xch, uint32_t domid, ioservid_t id);
+int xc_hvm_set_ioreq_server_state(
+ xc_interface *xch, uint32_t domid, ioservid_t id, int enabled);
+int xc_hvm_set_pci_intx_level(
+ xc_interface *xch, uint32_t domid, uint16_t segment, uint8_t bus,
+ uint8_t device, uint8_t intx, unsigned int level);
+int xc_hvm_set_isa_irq_level(
+ xc_interface *xch, uint32_t domid, uint8_t irq, unsigned int level);
+int xc_hvm_set_pci_link_route(
+ xc_interface *xch, uint32_t domid, uint8_t link, uint8_t irq);
+int xc_hvm_inject_msi(
+ xc_interface *xch, uint32_t domid, uint64_t msi_addr, uint32_t msi_data);
+int xc_hvm_track_dirty_vram(
+ xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr,
+ unsigned long *dirty_bitmap);
+int xc_hvm_modified_memory(
+ xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr);
+int xc_hvm_set_mem_type(
+ xc_interface *xch, uint32_t domid, hvmmem_type_t type,
+ uint64_t first_pfn, uint32_t nr);
+int xc_hvm_inject_trap(
+ xc_interface *xch, uint32_t domid, int vcpu, uint8_t vector,
+ uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t cr2);
+int xc_domain_pin_memory_cacheattr(
+ xc_interface *xch, uint32_t domid, uint64_t start, uint64_t end,
+ uint32_t type);
+
+#endif /* XC_WANT_COMPAT_DEVICEMODEL_API */
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * Copyright (c) 2017 Citrix Systems Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef XENDEVICEMODEL_H
+#define XENDEVICEMODEL_H
+
+#ifdef __XEN_TOOLS__
+
+#include <stdint.h>
+
+#include <xen/xen.h>
+#include <xen/hvm/dm_op.h>
+#include <xen/hvm/hvm_op.h>
+
+/* Callers who don't care don't need to #include <xentoollog.h> */
+struct xentoollog_logger;
+
+typedef struct xendevicemodel_handle xendevicemodel_handle;
+
+xendevicemodel_handle *xendevicemodel_open(struct xentoollog_logger *logger,
+ unsigned int open_flags);
+
+int xendevicemodel_close(xendevicemodel_handle *dmod);
+
+/*
+ * IOREQ Server API. (See section on IOREQ Servers in public/hvm_op.h).
+ */
+
+/**
+ * This function instantiates an IOREQ Server.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm handle_bufioreq how should the IOREQ Server handle buffered
+ * requests (HVM_IOREQSRV_BUFIOREQ_*)?
+ * @parm id pointer to an ioservid_t to receive the IOREQ Server id.
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_create_ioreq_server(
+ xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
+ ioservid_t *id);
+
+/**
+ * This function retrieves the necessary information to allow an
+ * emulator to use an IOREQ Server.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm ioreq_gfn pointer to a xen_pfn_t to receive the synchronous ioreq
+ * gfn. (May be NULL if not required)
+ * @parm bufioreq_gfn pointer to a xen_pfn_t to receive the buffered ioreq
+ * gfn. (May be NULL if not required)
+ * @parm bufioreq_port pointer to a evtchn_port_t to receive the buffered
+ * ioreq event channel. (May be NULL if not required)
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_get_ioreq_server_info(
+ xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
+ xen_pfn_t *ioreq_gfn, xen_pfn_t *bufioreq_gfn,
+ evtchn_port_t *bufioreq_port);
+
+/**
+ * This function registers a range of memory or I/O ports for emulation.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm is_mmio is this a range of ports or memory
+ * @parm start start of range
+ * @parm end end of range (inclusive).
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_map_io_range_to_ioreq_server(
+ xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
+ uint64_t start, uint64_t end);
+
+/**
+ * This function deregisters a range of memory or I/O ports for emulation.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm is_mmio is this a range of ports or memory
+ * @parm start start of range
+ * @parm end end of range (inclusive).
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_unmap_io_range_from_ioreq_server(
+ xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
+ uint64_t start, uint64_t end);
+
+/**
+ * This function registers/deregisters a memory type for emulation.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced.
+ * @parm id the IOREQ Server id.
+ * @parm type the memory type to be emulated. For now, only HVMMEM_ioreq_server
+ * is supported, and in the future new types can be introduced, e.g.
+ * HVMMEM_ioreq_serverX mapped to ioreq server X.
+ * @parm flags operations to be emulated; 0 for unmap. For now, only write
+ * operations will be emulated and can be extended to emulate
+ * read ones in the future.
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_map_mem_type_to_ioreq_server(
+ xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, uint16_t type,
+ uint32_t flags);
+
+/**
+ * This function registers a PCI device for config space emulation.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm segment the PCI segment of the device
+ * @parm bus the PCI bus of the device
+ * @parm device the 'slot' number of the device
+ * @parm function the function number of the device
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_map_pcidev_to_ioreq_server(
+ xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
+ uint16_t segment, uint8_t bus, uint8_t device, uint8_t function);
+
+/**
+ * This function deregisters a PCI device for config space emulation.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm segment the PCI segment of the device
+ * @parm bus the PCI bus of the device
+ * @parm device the 'slot' number of the device
+ * @parm function the function number of the device
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_unmap_pcidev_from_ioreq_server(
+ xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
+ uint16_t segment, uint8_t bus, uint8_t device, uint8_t function);
+
+/**
+ * This function destroys an IOREQ Server.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_destroy_ioreq_server(
+ xendevicemodel_handle *dmod, domid_t domid, ioservid_t id);
+
+/**
+ * This function sets IOREQ Server state. An IOREQ Server
+ * will not be passed emulation requests until it is in
+ * the enabled state.
+ * Note that the contents of the ioreq_gfn and bufioreq_gfn are
+ * not meaningful until the IOREQ Server is in the enabled state.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm enabled the state.
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_set_ioreq_server_state(
+ xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled);
+
+/**
+ * This function sets the level of INTx pin of an emulated PCI device.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm segment the PCI segment number of the emulated device
+ * @parm bus the PCI bus number of the emulated device
+ * @parm device the PCI device number of the emulated device
+ * @parm intx the INTx pin to modify (0 => A .. 3 => D)
+ * @parm level the level (1 for asserted, 0 for de-asserted)
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_set_pci_intx_level(
+ xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
+ uint8_t bus, uint8_t device, uint8_t intx, unsigned int level);
+
+/**
+ * This function sets the level of an ISA IRQ line.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm irq the IRQ number (0 - 15)
+ * @parm level the level (1 for asserted, 0 for de-asserted)
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_set_isa_irq_level(
+ xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
+ unsigned int level);
+
+/**
+ * This function maps a PCI INTx line to a an IRQ line.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm line the INTx line (0 => A .. 3 => B)
+ * @parm irq the IRQ number (0 - 15)
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_set_pci_link_route(
+ xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq);
+
+/**
+ * This function injects an MSI into a guest.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm msi_addr the MSI address (0xfeexxxxx)
+ * @parm msi_data the MSI data
+ * @return 0 on success, -1 on failure.
+*/
+int xendevicemodel_inject_msi(
+ xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
+ uint32_t msi_data);
+
+/**
+ * This function enables tracking of changes in the VRAM area.
+ *
+ * The following is done atomically:
+ * - get the dirty bitmap since the last call.
+ * - set up dirty tracking area for period up to the next call.
+ * - clear the dirty tracking area.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm first_pfn the start of the area to track
+ * @parm nr the number of pages to track
+ * @parm dirty_bitmal a pointer to the bitmap to be updated
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_track_dirty_vram(
+ xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
+ uint32_t nr, unsigned long *dirty_bitmap);
+
+/**
+ * This function notifies the hypervisor that a set of contiguous
+ * domain pages have been modified.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm first_pfn the start of the modified area
+ * @parm nr the number of pages modified
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_modified_memory(
+ xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
+ uint32_t nr);
+
+/**
+ * This function notifies the hypervisor that a set of discontiguous
+ * domain pages have been modified.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm extents an array of extent structs, which each hold
+ a start_pfn and nr (number of pfns).
+ * @parm nr the number of extents in the array
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_modified_memory_bulk(
+ xendevicemodel_handle *dmod, domid_t domid,
+ struct xen_dm_op_modified_memory_extent extents[], uint32_t nr);
+
+/**
+ * This function notifies the hypervisor that a set of domain pages
+ * are to be treated in a specific way. (See the definition of
+ * hvmmem_type_t).
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm mem_type determines how the set is to be treated
+ * @parm first_pfn the start of the set
+ * @parm nr the number of pages in the set
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_set_mem_type(
+ xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
+ uint64_t first_pfn, uint32_t nr);
+
+/**
+ * This function injects an event into a vCPU to take effect the next
+ * time it resumes.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm vcpu the vcpu id
+ * @parm vector the interrupt vector
+ * @parm type the event type (see the definition of enum x86_event_type)
+ * @parm error_code the error code or ~0 to skip
+ * @parm insn_len the instruction length
+ * @parm extra type-specific extra data (%cr2 for #PF, pending_dbg for #DB)
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_inject_event(
+ xendevicemodel_handle *dmod, domid_t domid, int vcpu, uint8_t vector,
+ uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t extra);
+
+/**
+ * Shuts the domain down.
+ *
+ * @parm reason usually enum sched_shutdown_reason, see xen/sched.h
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_shutdown(
+ xendevicemodel_handle *dmod, domid_t domid, unsigned int reason);
+
+/*
+ * Relocate GFNs for the specified domain.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm size Number of GFNs to process
+ * @parm src_gfn Starting GFN to relocate
+ * @parm dst_gfn Starting GFN where GFNs should be relocated
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_relocate_memory(
+ xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
+ uint64_t dst_gfn);
+
+/**
+ * Pins caching type of RAM space.
+ *
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced
+ * @parm start Start gfn
+ * @parm end End gfn
+ * @parm type XEN_DMOP_MEM_CACHEATTR_*
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_pin_memory_cacheattr(
+ xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
+ uint32_t type);
+
+/**
+ * This function restricts the use of this handle to the specified
+ * domain.
+ *
+ * @parm dmod handle to the open devicemodel interface
+ * @parm domid the domain id
+ * @return 0 on success, -1 on failure.
+ */
+int xendevicemodel_restrict(xendevicemodel_handle *dmod, domid_t domid);
+
+#endif /* __XEN_TOOLS__ */
+
+#endif /* XENDEVICEMODEL_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Split off from:
+ * xenctrl.h
+ *
+ * A library for low-level access to the Xen control interfaces.
+ *
+ * Copyright (c) 2003-2004, K A Fraser.
+ */
+
+#ifndef XENEVTCHN_H
+#define XENEVTCHN_H
+
+#include <stdint.h>
+
+#include <xen/event_channel.h>
+
+/* A port identifier is guaranteed to fit in 31 bits. */
+typedef int xenevtchn_port_or_error_t;
+
+typedef struct xenevtchn_handle xenevtchn_handle;
+
+/* Callers who don't care don't need to #include <xentoollog.h> */
+struct xentoollog_logger;
+
+/*
+ * EVENT CHANNEL FUNCTIONS
+ *
+ * None of these do any logging.
+ */
+
+/*
+ * Return a handle to the event channel driver, or NULL on failure, in
+ * which case errno will be set appropriately.
+ *
+ * Note: After fork(2) a child process must not use any opened evtchn
+ * handle inherited from their parent, nor access any grant mapped
+ * areas associated with that handle.
+ *
+ * The child must open a new handle if they want to interact with
+ * evtchn.
+ *
+ * Calling exec(2) in a child will safely (and reliably) reclaim any
+ * allocated resources via a xenevtchn_handle in the parent.
+ *
+ * A child which does not call exec(2) may safely call
+ * xenevtchn_close() on a xenevtchn_handle inherited from their
+ * parent. This will attempt to reclaim any resources associated with
+ * that handle. Note that in some implementations this reclamation may
+ * not be completely effective, in this case any affected resources
+ * remain allocated.
+ *
+ * Calling xenevtchn_close() is the only safe operation on a
+ * xenevtchn_handle which has been inherited.
+ */
+/* Currently no flags are defined */
+xenevtchn_handle *xenevtchn_open(struct xentoollog_logger *logger,
+ unsigned open_flags);
+
+/*
+ * Close a handle previously allocated with xenevtchn_open().
+ */
+int xenevtchn_close(xenevtchn_handle *xce);
+
+/*
+ * Return an fd that can be select()ed on.
+ *
+ * Note that due to bugs, setting this fd to non blocking may not
+ * work: you would hope that it would result in xenevtchn_pending
+ * failing with EWOULDBLOCK if there are no events signaled, but in
+ * fact it may block. (Bug is present in at least Linux 3.12, and
+ * perhaps on other platforms or later version.)
+ *
+ * To be safe, you must use poll() or select() before each call to
+ * xenevtchn_pending. If you have multiple threads (or processes)
+ * sharing a single xce handle this will not work, and there is no
+ * straightforward workaround. Please design your program some other
+ * way.
+ */
+int xenevtchn_fd(xenevtchn_handle *xce);
+
+/*
+ * Notify the given event channel. Returns -1 on failure, in which case
+ * errno will be set appropriately.
+ */
+int xenevtchn_notify(xenevtchn_handle *xce, evtchn_port_t port);
+
+/*
+ * Returns a new event port awaiting interdomain connection from the given
+ * domain ID, or -1 on failure, in which case errno will be set appropriately.
+ */
+xenevtchn_port_or_error_t
+xenevtchn_bind_unbound_port(xenevtchn_handle *xce, uint32_t domid);
+
+/*
+ * Returns a new event port bound to the remote port for the given domain ID,
+ * or -1 on failure, in which case errno will be set appropriately.
+ */
+xenevtchn_port_or_error_t
+xenevtchn_bind_interdomain(xenevtchn_handle *xce, uint32_t domid,
+ evtchn_port_t remote_port);
+
+/*
+ * Bind an event channel to the given VIRQ. Returns the event channel bound to
+ * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
+ */
+xenevtchn_port_or_error_t
+xenevtchn_bind_virq(xenevtchn_handle *xce, unsigned int virq);
+
+/*
+ * Unbind the given event channel. Returns -1 on failure, in which case errno
+ * will be set appropriately.
+ */
+int xenevtchn_unbind(xenevtchn_handle *xce, evtchn_port_t port);
+
+/*
+ * Return the next event channel to become pending, or -1 on failure, in which
+ * case errno will be set appropriately.
+ *
+ * At the hypervisor level the event channel will have been masked,
+ * and then cleared, by the underlying machinery (evtchn kernel
+ * driver, or equivalent). So if the event channel is signaled again
+ * after it is returned here, it will be queued up, and delivered
+ * again after you unmask it. (See the documentation in the Xen
+ * public header event_channel.h.)
+ *
+ * On receiving the notification from xenevtchn_pending, you should
+ * normally: check (by other means) what work needs doing; do the
+ * necessary work (if any); unmask the event channel with
+ * xenevtchn_unmask (if you want to receive any further
+ * notifications).
+ */
+xenevtchn_port_or_error_t
+xenevtchn_pending(xenevtchn_handle *xce);
+
+/*
+ * Unmask the given event channel. Returns -1 on failure, in which case errno
+ * will be set appropriately.
+ */
+int xenevtchn_unmask(xenevtchn_handle *xce, evtchn_port_t port);
+
+/**
+ * This function restricts the use of this handle to the specified
+ * domain.
+ *
+ * @parm xce handle to the open evtchn interface
+ * @parm domid the domain id
+ * @return 0 on success, -1 on failure with errno set appropriately.
+ */
+int xenevtchn_restrict(xenevtchn_handle *xce, domid_t domid);
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef XENFOREIGNMEMORY_H
+#define XENFOREIGNMEMORY_H
+
+/*
+ * This library allows you to map foreign domain memory, subject to
+ * permissions for both the process and the domain in which the
+ * process runs.
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+
+#include <xen/xen.h>
+
+/* Callers who don't care don't need to #include <xentoollog.h> */
+struct xentoollog_logger;
+
+typedef struct xenforeignmemory_handle xenforeignmemory_handle;
+
+/*
+ * Return a handle onto the foreign memory mapping driver. Logs errors.
+ *
+ * Note: After fork(2) a child process must not use any opened
+ * foreignmemory handle inherited from their parent, nor access any
+ * grant mapped areas associated with that handle.
+ *
+ * The child must open a new handle if they want to interact with
+ * foreignmemory.
+ *
+ * Calling exec(2) in a child will safely (and reliably) reclaim any
+ * resources which were allocated via a xenforeignmemory_handle in the
+ * parent.
+ *
+ * A child which does not call exec(2) may safely call
+ * xenforeignmemory_close() on a xenforeignmemory_handle inherited
+ * from their parent. This will attempt to reclaim any resources
+ * associated with that handle. Note that in some implementations this
+ * reclamation may not be completely effective, in this case any
+ * affected resources remain allocated.
+ *
+ * Calling xenforeignmemory_close() is the only safe operation on a
+ * xenforeignmemory_handle which has been inherited.
+ */
+xenforeignmemory_handle *xenforeignmemory_open(struct xentoollog_logger *logger,
+ unsigned open_flags);
+
+/*
+ * Close a handle previously allocated with xenforeignmemory_open().
+ *
+ * Under normal circumstances (i.e. not in the child after a fork)
+ * xenforeignmemory_unmap() should be used on all mappings allocated
+ * by xenforeignmemory_map() prior to closing the handle in order to
+ * free up resources associated with those mappings.
+ *
+ * This is the only function which may be safely called on a
+ * xenforeignmemory_handle in a child after a
+ * fork. xenforeignmemory_unmap() must not be called under such
+ * circumstances.
+ */
+int xenforeignmemory_close(xenforeignmemory_handle *fmem);
+
+/*
+ * Maps a range within one domain to a local address range. Mappings
+ * must be unmapped with xenforeignmemory_unmap and should follow the
+ * same rules as mmap regarding page alignment.
+ *
+ * prot is as for mmap(2).
+ *
+ * @arr is an array of @pages gfns to be mapped linearly in the local
+ * address range. @err is an (optional) output array used to report
+ * per-page errors, as errno values.
+ *
+ * If @err is given (is non-NULL) then the mapping may partially
+ * succeed and return a valid pointer while also using @err to
+ * indicate the success (0) or failure (errno value) of the individual
+ * pages. The global errno thread local variable is not valid in this
+ * case.
+ *
+ * If @err is not given (is NULL) then on failure to map any page any
+ * successful mappings will be undone and NULL will be returned. errno
+ * will be set to correspond to the first failure (which may not be
+ * the most critical).
+ *
+ * It is also possible to return NULL due to a complete failure,
+ * i.e. failure to even attempt the mapping, in this case the global
+ * errno will have been set and the contents of @err (if given) is
+ * invalid.
+ *
+ * Note that it is also possible to return non-NULL with the contents
+ * of @err indicating failure to map every page.
+ */
+void *xenforeignmemory_map(xenforeignmemory_handle *fmem, uint32_t dom,
+ int prot, size_t pages,
+ const xen_pfn_t arr[/*pages*/], int err[/*pages*/]);
+
+/*
+ * Almost like the previous one but also accepts two additional parameters:
+ *
+ * @addr is used as a hint address for foreign map placement (see mmap(2)).
+ * @flags is a set of additional flags as for mmap(2). Not all of the flag
+ * combinations are possible due to implementation details on different
+ * platforms.
+ */
+void *xenforeignmemory_map2(xenforeignmemory_handle *fmem, uint32_t dom,
+ void *addr, int prot, int flags, size_t pages,
+ const xen_pfn_t arr[/*pages*/], int err[/*pages*/]);
+
+/*
+ * Unmap a mapping previous created with xenforeignmemory_map().
+ *
+ * Returns 0 on success on failure sets errno and returns -1.
+ */
+int xenforeignmemory_unmap(xenforeignmemory_handle *fmem,
+ void *addr, size_t pages);
+
+/**
+ * This function restricts the use of this handle to the specified
+ * domain.
+ *
+ * @parm fmem handle to the open foreignmemory interface
+ * @parm domid the domain id
+ * @return 0 on success, -1 on failure.
+ */
+int xenforeignmemory_restrict(xenforeignmemory_handle *fmem,
+ domid_t domid);
+
+typedef struct xenforeignmemory_resource_handle xenforeignmemory_resource_handle;
+
+/**
+ * This function maps a guest resource.
+ *
+ * @parm fmem handle to the open foreignmemory interface
+ * @parm domid the domain id
+ * @parm type the resource type
+ * @parm id the type-specific resource identifier
+ * @parm frame base frame index within the resource
+ * @parm nr_frames number of frames to map
+ * @parm paddr pointer to an address passed through to mmap(2)
+ * @parm prot passed through to mmap(2)
+ * @parm POSIX-only flags passed through to mmap(2)
+ * @return pointer to foreignmemory resource handle on success, NULL on
+ * failure
+ *
+ * *paddr is used, on entry, as a hint address for foreign map placement
+ * (see mmap(2)) so should be set to NULL if no specific placement is
+ * required. On return *paddr contains the address where the resource is
+ * mapped.
+ * As for xenforeignmemory_map2() flags is a set of additional flags
+ * for mmap(2). Not all of the flag combinations are possible due to
+ * implementation details on different platforms.
+ */
+xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
+ xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
+ unsigned int id, unsigned long frame, unsigned long nr_frames,
+ void **paddr, int prot, int flags);
+
+/**
+ * This function releases a previously acquired resource.
+ *
+ * @parm fmem handle to the open foreignmemory interface
+ * @parm fres handle to the acquired resource
+ *
+ * Returns 0 on success on failure sets errno and returns -1.
+ */
+int xenforeignmemory_unmap_resource(
+ xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres);
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Split off from:
+ * xenctrl.h
+ *
+ * A library for low-level access to the Xen control interfaces.
+ *
+ * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
+ * Copyright (c) 2018, Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+#ifndef XENGNTTAB_H
+#define XENGNTTAB_H
+
+#include <stdint.h>
+
+#include <xen/grant_table.h>
+#include <xen/event_channel.h>
+
+/* Callers who don't care don't need to #include <xentoollog.h> */
+struct xentoollog_logger;
+
+/*
+ * PRODUCING AND CONSUMING GRANT REFERENCES
+ * ========================================
+ *
+ * The xengnttab library contains two distinct interfaces, each with
+ * their own distinct handle type and entry points. The represent the
+ * two sides of the grant table interface, producer (gntshr) and
+ * consumer (gnttab).
+ *
+ * The xengnttab_* interfaces take a xengnttab_handle and provide
+ * mechanisms for consuming (i.e. mapping or copying to/from) grant
+ * references provided by a peer.
+ *
+ * The xengntshr_* interfaces take a xengntshr_handle and provide a
+ * mechanism to produce grantable memory and grant references to that
+ * memory, which can be handed to some peer.
+ *
+ * UNMAP NOTIFICATION
+ * ==================
+ *
+ * The xengnt{tab,shr}_*_notify interfaces implement a cooperative
+ * interface which is intended to allow the underlying kernel
+ * interfaces to attempt to notify the peer to perform graceful
+ * teardown upon failure (i.e. crash or exit) of the process on their
+ * end.
+ *
+ * These interfaces operate on a single page only and are intended for
+ * use on the main shared-ring page of a protocol. It is assumed that
+ * on teardown both ends would automatically teardown all grants
+ * associated with the protocol in addition to the shared ring itself.
+ *
+ * Each end is able to optionally nominate a byte offset within the
+ * shared page or an event channel or both. On exit of the process the
+ * underlying kernel driver will zero the byte at the given offset and
+ * signal the event channel.
+ *
+ * The event channel can be the same event channel used for regular
+ * ring progress notifications, or may be a dedicated event channel.
+ *
+ * Both ends may share the same notification byte offset within the
+ * shared page, or may have dedicated "client" and "server" status
+ * bytes.
+ *
+ * Since the byte is cleared on shutdown the protocol must use 0 as
+ * the "closed/dead" status, but is permitted to use any other non-0
+ * values to indicate various other "live" states (waiting for
+ * connection, connected, etc).
+ *
+ * Both ends are permitted to modify (including clear) their
+ * respective status bytes and to signal the event channel themselves
+ * from userspace.
+ *
+ * Depending on the mechanisms which have been registered an
+ * the peer may receive a shutdown notification as:
+ *
+ * - An event channel notification on a dedicated event channel
+ * - Observation of the other ends's status byte being cleared
+ * (whether in response to an explicit notification or in the
+ * course of normal operation).
+ *
+ * The mechanism should be defined as part of the specific ring
+ * protocol.
+ *
+ * Upon receiving notification of the peer is expected to teardown any
+ * resources (and in particular any grant mappings) in a timely
+ * manner.
+ *
+ * NOTE: this protocol is intended to allow for better error behaviour
+ * and recovery between two cooperating peers. It does not cover the
+ * case of a malicious peer who may continue to hold resources open.
+ */
+
+/*
+ * Grant Table Interface (making use of grants from other domains)
+ */
+
+typedef struct xengntdev_handle xengnttab_handle;
+
+/*
+ * Returns a handle onto the grant table driver. Logs errors.
+ *
+ * Note: After fork(2) a child process must not use any opened gnttab
+ * handle inherited from their parent, nor access any grant mapped
+ * areas associated with that handle.
+ *
+ * The child must open a new handle if they want to interact with
+ * gnttab.
+ *
+ * Calling exec(2) in a child will safely (and reliably) reclaim any
+ * resources which were allocated via a xengnttab_handle in the parent.
+ *
+ * A child which does not call exec(2) may safely call
+ * xengnttab_close() on a xengnttab_handle inherited from their
+ * parent. This will attempt to reclaim any resources associated with
+ * that handle. Note that in some implementations this reclamation may
+ * not be completely effective, in this case any affected resources
+ * remain allocated.
+ *
+ * Calling xengnttab_close() is the only safe operation on a
+ * xengnttab_handle which has been inherited. xengnttab_unmap() must
+ * not be called under such circumstances.
+ */
+xengnttab_handle *xengnttab_open(struct xentoollog_logger *logger,
+ unsigned open_flags);
+
+/*
+ * Close a handle previously allocated with xengnttab_open(),
+ * including unmaping any current grant maps. Never logs errors.
+ *
+ * Under normal circumstances (i.e. not in the child after a fork)
+ * xengnttab_unmap() should be used on all mappings allocated through
+ * a xengnttab_handle prior to closing the handle in order to free up
+ * resources associated with those mappings.
+ *
+ * This is the only function which may be safely called on a
+ * xengnttab_handle in a child after a fork.
+ */
+int xengnttab_close(xengnttab_handle *xgt);
+
+
+/*
+ * Return the fd used internally by xengnttab. selecting on it is not
+ * useful. But it could be useful for unusual use cases; perhaps,
+ * passing to other programs, calling ioctls on directly, or maybe
+ * calling fcntl.
+ */
+int xengnttab_fd(xengnttab_handle *xgt);
+
+/**
+ * Memory maps a grant reference from one domain to a local address range.
+ * Mappings should be unmapped with xengnttab_unmap. Logs errors.
+ *
+ * @parm xgt a handle on an open grant table interface
+ * @parm domid the domain to map memory from
+ * @parm ref the grant reference ID to map
+ * @parm prot same flag as in mmap()
+ */
+void *xengnttab_map_grant_ref(xengnttab_handle *xgt,
+ uint32_t domid,
+ uint32_t ref,
+ int prot);
+
+/**
+ * Memory maps one or more grant references from one or more domains to a
+ * contiguous local address range. Mappings should be unmapped with
+ * xengnttab_unmap. Logs errors.
+ *
+ * On failure (including partial failure) sets errno and returns
+ * NULL. On partial failure no mappings are established (any partial
+ * work is undone).
+ *
+ * @parm xgt a handle on an open grant table interface
+ * @parm count the number of grant references to be mapped
+ * @parm domids an array of @count domain IDs by which the corresponding @refs
+ * were granted
+ * @parm refs an array of @count grant references to be mapped
+ * @parm prot same flag as in mmap()
+ */
+void *xengnttab_map_grant_refs(xengnttab_handle *xgt,
+ uint32_t count,
+ uint32_t *domids,
+ uint32_t *refs,
+ int prot);
+
+/**
+ * Memory maps one or more grant references from one domain to a
+ * contiguous local address range. Mappings should be unmapped with
+ * xengnttab_unmap. Logs errors.
+ *
+ * This call is equivalent to calling @xengnttab_map_grant_refs with a
+ * @domids array with every entry set to @domid.
+ *
+ * @parm xgt a handle on an open grant table interface
+ * @parm count the number of grant references to be mapped
+ * @parm domid the domain to map memory from
+ * @parm refs an array of @count grant references to be mapped
+ * @parm prot same flag as in mmap()
+ */
+void *xengnttab_map_domain_grant_refs(xengnttab_handle *xgt,
+ uint32_t count,
+ uint32_t domid,
+ uint32_t *refs,
+ int prot);
+
+/**
+ * Memory maps a grant reference from one domain to a local address range.
+ * Mappings should be unmapped with xengnttab_unmap. If notify_offset or
+ * notify_port are not -1, this version will attempt to set up an unmap
+ * notification at the given offset and event channel. When the page is
+ * unmapped, the byte at the given offset will be zeroed and a wakeup will be
+ * sent to the given event channel. Logs errors.
+ *
+ * On failure sets errno and returns NULL.
+ *
+ * If notify_offset or notify_port are requested and cannot be set up
+ * an error will be returned and no mapping will be made.
+ *
+ * @parm xgt a handle on an open grant table interface
+ * @parm domid the domain to map memory from
+ * @parm ref the grant reference ID to map
+ * @parm prot same flag as in mmap()
+ * @parm notify_offset The byte offset in the page to use for unmap
+ * notification; -1 for none.
+ * @parm notify_port The event channel port to use for unmap notify, or -1
+ */
+void *xengnttab_map_grant_ref_notify(xengnttab_handle *xgt,
+ uint32_t domid,
+ uint32_t ref,
+ int prot,
+ uint32_t notify_offset,
+ evtchn_port_t notify_port);
+
+/**
+ * Unmaps the @count pages starting at @start_address, which were
+ * mapped by a call to xengnttab_map_grant_ref,
+ * xengnttab_map_grant_refs or xengnttab_map_grant_ref_notify. Never
+ * logs.
+ *
+ * If the mapping was made using xengnttab_map_grant_ref_notify() with
+ * either notify_offset or notify_port then the peer will be notified.
+ */
+int xengnttab_unmap(xengnttab_handle *xgt, void *start_address, uint32_t count);
+
+/**
+ * Sets the maximum number of grants that may be mapped by the given
+ * instance to @count. Never logs.
+ *
+ * N.B. This function must be called after opening the handle, and before any
+ * other functions are invoked on it.
+ *
+ * N.B. When variable-length grants are mapped, fragmentation may be observed,
+ * and it may not be possible to satisfy requests up to the maximum number
+ * of grants.
+ */
+int xengnttab_set_max_grants(xengnttab_handle *xgt,
+ uint32_t nr_grants);
+
+struct xengnttab_grant_copy_segment {
+ union xengnttab_copy_ptr {
+ void *virt;
+ struct {
+ uint32_t ref;
+ uint16_t offset;
+ uint16_t domid;
+ } foreign;
+ } source, dest;
+ uint16_t len;
+ uint16_t flags;
+ int16_t status;
+};
+
+typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
+
+/**
+ * Copy memory from or to grant references. The information of each operations
+ * are contained in 'xengnttab_grant_copy_segment_t'. The @flag value indicate
+ * the direction of an operation (GNTCOPY_source_gref\GNTCOPY_dest_gref).
+ *
+ * For each segment, @virt may cross a page boundary but @offset + @len
+ * must not exceed XEN_PAGE_SIZE.
+ */
+int xengnttab_grant_copy(xengnttab_handle *xgt,
+ uint32_t count,
+ xengnttab_grant_copy_segment_t *segs);
+
+/*
+ * Flags to be used while requesting memory mapping's backing storage
+ * to be allocated with DMA API.
+ */
+
+/*
+ * The buffer is backed with memory allocated with dma_alloc_wc.
+ */
+#define GNTDEV_DMA_FLAG_WC (1 << 0)
+
+/*
+ * The buffer is backed with memory allocated with dma_alloc_coherent.
+ */
+#define GNTDEV_DMA_FLAG_COHERENT (1 << 1)
+
+/**
+ * Create a dma-buf [1] from grant references @refs of count @count provided
+ * by the foreign domain @domid with flags @flags.
+ *
+ * By default dma-buf is backed by system memory pages, but by providing
+ * one of the GNTDEV_DMA_FLAG_XXX flags it can also be created as
+ * a DMA write-combine or coherent buffer.
+ *
+ * Returns 0 if dma-buf was successfully created and the corresponding
+ * dma-buf's file descriptor is returned in @fd.
+ *
+ * [1] https://elixir.bootlin.com/linux/latest/source/Documentation/driver-api/dma-buf.rst
+ */
+int xengnttab_dmabuf_exp_from_refs(xengnttab_handle *xgt, uint32_t domid,
+ uint32_t flags, uint32_t count,
+ const uint32_t *refs, uint32_t *fd);
+
+/*
+ * This will block until the dma-buf with the file descriptor @fd is
+ * released. This is only valid for buffers created with
+ * IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS.
+ *
+ * If withing @wait_to_ms milliseconds the buffer is not released
+ * then -ETIMEDOUT error is returned.
+ * If the buffer with file descriptor @fd does not exist or has already
+ * been released, then -ENOENT is returned. For valid file descriptors
+ * this must not be treated as error.
+ */
+int xengnttab_dmabuf_exp_wait_released(xengnttab_handle *xgt, uint32_t fd,
+ uint32_t wait_to_ms);
+
+/*
+ * Import a dma-buf with file descriptor @fd and export granted references
+ * to the pages of that dma-buf into array @refs of size @count.
+ */
+int xengnttab_dmabuf_imp_to_refs(xengnttab_handle *xgt, uint32_t domid,
+ uint32_t fd, uint32_t count, uint32_t *refs);
+
+/*
+ * This will close all references to an imported buffer, so it can be
+ * released by the owner. This is only valid for buffers created with
+ * IOCTL_GNTDEV_DMABUF_IMP_TO_REFS.
+ */
+int xengnttab_dmabuf_imp_release(xengnttab_handle *xgt, uint32_t fd);
+
+/*
+ * Grant Sharing Interface (allocating and granting pages to others)
+ */
+
+typedef struct xengntdev_handle xengntshr_handle;
+
+/*
+ * Returns a handle onto the grant sharing driver. Logs errors.
+ *
+ * Note: After fork(2) a child process must not use any opened gntshr
+ * handle inherited from their parent, nor access any grant mapped
+ * areas associated with that handle.
+ *
+ * The child must open a new handle if they want to interact with
+ * gntshr.
+ *
+ * Calling exec(2) in a child will safely (and reliably) reclaim any
+ * resources which were allocated via a xengntshr_handle in the
+ * parent.
+ *
+ * A child which does not call exec(2) may safely call
+ * xengntshr_close() on a xengntshr_handle inherited from their
+ * parent. This will attempt to reclaim any resources associated with
+ * that handle. Note that in some implementations this reclamation may
+ * not be completely effective, in this case any affected resources
+ * remain allocated.
+ *
+ * Calling xengntshr_close() is the only safe operation on a
+ * xengntshr_handle which has been inherited.
+ */
+xengntshr_handle *xengntshr_open(struct xentoollog_logger *logger,
+ unsigned open_flags);
+
+/*
+ * Close a handle previously allocated with xengntshr_open().
+ * Never logs errors.
+ *
+ * Under normal circumstances (i.e. not in the child after a fork)
+ * xengntshr_unmap() should be used on all mappings allocated through
+ * a xengnttab_handle prior to closing the handle in order to free up
+ * resources associated with those mappings.
+ *
+ * xengntshr_close() is the only function which may be safely called
+ * on a xengntshr_handle in a child after a fork. xengntshr_unshare()
+ * must not be called under such circumstances.
+ */
+int xengntshr_close(xengntshr_handle *xgs);
+
+/*
+ * Return the fd used internally by xengntshr. selecting on it is not
+ * useful. But it could be useful for unusual use cases; perhaps,
+ * passing to other programs, calling ioctls on directly, or maybe
+ * calling fcntl.
+ */
+int xengntshr_fd(xengntshr_handle *xgs);
+
+/**
+ * Allocates and shares pages with another domain.
+ *
+ * On failure sets errno and returns NULL. No allocations will be made.
+ *
+ * This library only provides functionality for sharing memory
+ * allocated via this call, memory from elsewhere (malloc, mmap etc)
+ * cannot be shared here.
+ *
+ * @parm xgs a handle to an open grant sharing instance
+ * @parm domid the domain to share memory with
+ * @parm count the number of pages to share
+ * @parm refs the grant references of the pages (output)
+ * @parm writable true if the other domain can write to the pages
+ * @return local mapping of the pages
+ */
+void *xengntshr_share_pages(xengntshr_handle *xgs, uint32_t domid,
+ int count, uint32_t *refs, int writable);
+
+/**
+ * Creates and shares a page with another domain, with unmap notification.
+ *
+ * @parm xgs a handle to an open grant sharing instance
+ * @parm domid the domain to share memory with
+ * @parm refs the grant reference of the pages (output)
+ * @parm writable true if the other domain can write to the page
+ * @parm notify_offset The byte offset in the page to use for unmap
+ * notification; -1 for none.
+ * @parm notify_port The event channel port to use for unmap notify, or -1
+ * @return local mapping of the page
+ */
+void *xengntshr_share_page_notify(xengntshr_handle *xgs, uint32_t domid,
+ uint32_t *ref, int writable,
+ uint32_t notify_offset,
+ evtchn_port_t notify_port);
+
+/**
+ * Unmaps the @count pages starting at @start_address, which were
+ * mapped by a call to xengntshr_share_*. Never logs.
+ *
+ * If the mapping was made using xengntshr_share_page_notify() with
+ * either notify_offset or notify_port then the peer will be notified.
+ */
+int xengntshr_unshare(xengntshr_handle *xgs, void *start_address, uint32_t count);
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/******************************************************************************
+ * xenguest.h
+ *
+ * A library for guest domain management in Xen.
+ *
+ * Copyright (c) 2003-2004, K A Fraser.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XENGUEST_H
+#define XENGUEST_H
+
+#define XC_NUMA_NO_NODE (~0U)
+
+#define XCFLAGS_LIVE (1 << 0)
+#define XCFLAGS_DEBUG (1 << 1)
+
+#define X86_64_B_SIZE 64
+#define X86_32_B_SIZE 32
+
+#define X86_HVM_NR_SPECIAL_PAGES 8
+#define X86_HVM_END_SPECIAL_REGION 0xff000u
+#define XG_MAX_MODULES 2
+
+/* --- typedefs and structs ---------------------------------------- */
+
+typedef uint64_t xen_vaddr_t;
+typedef uint64_t xen_paddr_t;
+
+#define PRIpfn PRI_xen_pfn
+
+struct xc_dom_seg {
+ xen_vaddr_t vstart;
+ xen_vaddr_t vend;
+ xen_pfn_t pfn;
+ xen_pfn_t pages;
+};
+
+struct xc_hvm_firmware_module {
+ uint8_t *data;
+ uint32_t length;
+ uint64_t guest_addr_out;
+};
+
+struct xc_dom_mem {
+ struct xc_dom_mem *next;
+ void *ptr;
+ enum {
+ XC_DOM_MEM_TYPE_MALLOC_INTERNAL,
+ XC_DOM_MEM_TYPE_MALLOC_EXTERNAL,
+ XC_DOM_MEM_TYPE_MMAP,
+ } type;
+ size_t len;
+ unsigned char memory[0];
+};
+
+struct xc_dom_phys {
+ struct xc_dom_phys *next;
+ void *ptr;
+ xen_pfn_t first;
+ xen_pfn_t count;
+};
+
+struct xc_dom_module {
+ void *blob;
+ size_t size;
+ void *cmdline;
+ /* If seg.vstart is non zero then the module will be loaded at that
+ * address, otherwise it will automatically placed.
+ *
+ * If automatic placement is used and the module is gzip
+ * compressed then it will be decompressed as it is loaded. If the
+ * module has been explicitly placed then it is loaded as is
+ * otherwise decompressing risks undoing the manual placement.
+ */
+ struct xc_dom_seg seg;
+};
+
+struct xc_dom_image {
+ /* files */
+ void *kernel_blob;
+ size_t kernel_size;
+ unsigned int num_modules;
+ struct xc_dom_module modules[XG_MAX_MODULES];
+ void *devicetree_blob;
+ size_t devicetree_size;
+
+ size_t max_kernel_size;
+ size_t max_module_size;
+ size_t max_devicetree_size;
+
+ /* arguments and parameters */
+ char *cmdline;
+ size_t cmdline_size;
+ uint32_t f_requested[XENFEAT_NR_SUBMAPS];
+
+ /* info from (elf) kernel image */
+ struct elf_dom_parms *parms;
+ char *guest_type;
+
+ /* memory layout */
+ struct xc_dom_seg kernel_seg;
+ struct xc_dom_seg p2m_seg;
+ struct xc_dom_seg pgtables_seg;
+ struct xc_dom_seg devicetree_seg;
+ struct xc_dom_seg start_info_seg;
+ xen_pfn_t start_info_pfn;
+ xen_pfn_t console_pfn;
+ xen_pfn_t xenstore_pfn;
+ xen_pfn_t shared_info_pfn;
+ xen_pfn_t bootstack_pfn;
+ xen_pfn_t pfn_alloc_end;
+ xen_vaddr_t virt_alloc_end;
+ xen_vaddr_t bsd_symtab_start;
+
+ /*
+ * initrd parameters as specified in start_info page
+ * Depending on capabilities of the booted kernel this may be a virtual
+ * address or a pfn. Type is neutral and large enough to hold a virtual
+ * address of a 64 bit kernel even with 32 bit toolstack.
+ */
+ uint64_t initrd_start;
+ uint64_t initrd_len;
+
+ unsigned int alloc_bootstack;
+ xen_vaddr_t virt_pgtab_end;
+
+ /* other state info */
+ uint32_t f_active[XENFEAT_NR_SUBMAPS];
+
+ /*
+ * pv_p2m is specific to x86 PV guests, and maps GFNs to MFNs. It is
+ * eventually copied into guest context.
+ */
+ xen_pfn_t *pv_p2m;
+
+ /* physical memory
+ *
+ * An x86 PV guest has one or more blocks of physical RAM,
+ * consisting of total_pages starting at 0. The start address and
+ * size of each block is controlled by vNUMA structures.
+ *
+ * An ARM guest has GUEST_RAM_BANKS regions of RAM, with
+ * rambank_size[i] pages in each. The lowest RAM address
+ * (corresponding to the base of the p2m arrays above) is stored
+ * in rambase_pfn.
+ */
+ xen_pfn_t rambase_pfn;
+ xen_pfn_t total_pages;
+ xen_pfn_t p2m_size; /* number of pfns covered by p2m */
+ struct xc_dom_phys *phys_pages;
+#if defined (__arm__) || defined(__aarch64__)
+ xen_pfn_t rambank_size[GUEST_RAM_BANKS];
+#endif
+
+ /* malloc memory pool */
+ struct xc_dom_mem *memblocks;
+
+ /* memory footprint stats */
+ size_t alloc_malloc;
+ size_t alloc_mem_map;
+ size_t alloc_file_map;
+ size_t alloc_domU_map;
+
+ /* misc xen domain config stuff */
+ unsigned long flags;
+ unsigned int console_evtchn;
+ unsigned int xenstore_evtchn;
+ uint32_t console_domid;
+ uint32_t xenstore_domid;
+ xen_pfn_t shared_info_mfn;
+
+ xc_interface *xch;
+ uint32_t guest_domid;
+ int claim_enabled; /* 0 by default, 1 enables it */
+
+ int xen_version;
+ xen_capabilities_info_t xen_caps;
+
+ /* kernel loader, arch hooks */
+ struct xc_dom_loader *kernel_loader;
+ void *private_loader;
+
+ /* vNUMA information */
+ xen_vmemrange_t *vmemranges;
+ unsigned int nr_vmemranges;
+ unsigned int *vnode_to_pnode;
+ unsigned int nr_vnodes;
+
+ /* domain type/architecture specific data */
+ void *arch_private;
+
+ /* kernel loader */
+ struct xc_dom_arch *arch_hooks;
+ /* allocate up to pfn_alloc_end */
+ int (*allocate) (struct xc_dom_image * dom);
+
+ /* Container type (HVM or PV). */
+ enum {
+ XC_DOM_PV_CONTAINER,
+ XC_DOM_HVM_CONTAINER,
+ } container_type;
+
+ /* HVM specific fields. */
+ xen_pfn_t target_pages;
+ xen_paddr_t mmio_start;
+ xen_paddr_t mmio_size;
+ xen_paddr_t lowmem_end;
+ xen_paddr_t highmem_end;
+ xen_pfn_t vga_hole_size;
+
+ /* If unset disables the setup of the IOREQ pages. */
+ bool device_model;
+
+ /* BIOS/Firmware passed to HVMLOADER */
+ struct xc_hvm_firmware_module system_firmware_module;
+
+ /* Extra ACPI tables */
+#define MAX_ACPI_MODULES 4
+ struct xc_hvm_firmware_module acpi_modules[MAX_ACPI_MODULES];
+
+ /* Extra SMBIOS structures passed to HVMLOADER */
+ struct xc_hvm_firmware_module smbios_module;
+
+#if defined(__i386__) || defined(__x86_64__)
+ struct e820entry *e820;
+ unsigned int e820_entries;
+#endif
+
+ xen_pfn_t vuart_gfn;
+
+ /* Number of vCPUs */
+ unsigned int max_vcpus;
+};
+
+/* --- arch specific hooks ----------------------------------------- */
+
+struct xc_dom_arch {
+ int (*alloc_magic_pages) (struct xc_dom_image * dom);
+
+ /* pagetable setup - x86 PV only */
+ int (*alloc_pgtables) (struct xc_dom_image * dom);
+ int (*alloc_p2m_list) (struct xc_dom_image * dom);
+ int (*setup_pgtables) (struct xc_dom_image * dom);
+
+ /* arch-specific data structs setup */
+ /* in Mini-OS environment start_info might be a macro, avoid collision. */
+#undef start_info
+ int (*start_info) (struct xc_dom_image * dom);
+ int (*shared_info) (struct xc_dom_image * dom, void *shared_info);
+ int (*vcpu) (struct xc_dom_image * dom);
+ int (*bootearly) (struct xc_dom_image * dom);
+ int (*bootlate) (struct xc_dom_image * dom);
+
+ /* arch-specific memory initialization. */
+ int (*meminit) (struct xc_dom_image * dom);
+
+ char *guest_type;
+ char *native_protocol;
+ int page_shift;
+ int sizeof_pfn;
+ int p2m_base_supported;
+ int arch_private_size;
+
+ struct xc_dom_arch *next;
+};
+void xc_dom_register_arch_hooks(struct xc_dom_arch *hooks);
+
+#define XC_DOM_PAGE_SHIFT(dom) ((dom)->arch_hooks->page_shift)
+#define XC_DOM_PAGE_SIZE(dom) (1LL << (dom)->arch_hooks->page_shift)
+
+/* --- main functions ---------------------------------------------- */
+
+struct xc_dom_image *xc_dom_allocate(xc_interface *xch,
+ const char *cmdline, const char *features);
+void xc_dom_release_phys(struct xc_dom_image *dom);
+void xc_dom_release(struct xc_dom_image *dom);
+int xc_dom_rambase_init(struct xc_dom_image *dom, uint64_t rambase);
+int xc_dom_mem_init(struct xc_dom_image *dom, unsigned int mem_mb);
+
+/* Set this larger if you have enormous modules/kernels. Note that
+ * you should trust all kernels not to be maliciously large (e.g. to
+ * exhaust all dom0 memory) if you do this (see CVE-2012-4544 /
+ * XSA-25). You can also set the default independently for
+ * modules/kernels in xc_dom_allocate() or call
+ * xc_dom_{kernel,module}_max_size.
+ */
+#ifndef XC_DOM_DECOMPRESS_MAX
+#define XC_DOM_DECOMPRESS_MAX (1024*1024*1024) /* 1GB */
+#endif
+
+int xc_dom_kernel_check_size(struct xc_dom_image *dom, size_t sz);
+int xc_dom_kernel_max_size(struct xc_dom_image *dom, size_t sz);
+
+int xc_dom_module_max_size(struct xc_dom_image *dom, size_t sz);
+
+int xc_dom_devicetree_max_size(struct xc_dom_image *dom, size_t sz);
+
+size_t xc_dom_check_gzip(xc_interface *xch,
+ void *blob, size_t ziplen);
+int xc_dom_do_gunzip(xc_interface *xch,
+ void *src, size_t srclen, void *dst, size_t dstlen);
+int xc_dom_try_gunzip(struct xc_dom_image *dom, void **blob, size_t * size);
+
+int xc_dom_kernel_file(struct xc_dom_image *dom, const char *filename);
+int xc_dom_module_file(struct xc_dom_image *dom, const char *filename,
+ const char *cmdline);
+int xc_dom_kernel_mem(struct xc_dom_image *dom, const void *mem,
+ size_t memsize);
+int xc_dom_module_mem(struct xc_dom_image *dom, const void *mem,
+ size_t memsize, const char *cmdline);
+int xc_dom_devicetree_file(struct xc_dom_image *dom, const char *filename);
+int xc_dom_devicetree_mem(struct xc_dom_image *dom, const void *mem,
+ size_t memsize);
+
+int xc_dom_parse_image(struct xc_dom_image *dom);
+int xc_dom_set_arch_hooks(struct xc_dom_image *dom);
+int xc_dom_build_image(struct xc_dom_image *dom);
+
+int xc_dom_boot_xen_init(struct xc_dom_image *dom, xc_interface *xch,
+ uint32_t domid);
+int xc_dom_boot_mem_init(struct xc_dom_image *dom);
+void *xc_dom_boot_domU_map(struct xc_dom_image *dom, xen_pfn_t pfn,
+ xen_pfn_t count);
+int xc_dom_boot_image(struct xc_dom_image *dom);
+int xc_dom_compat_check(struct xc_dom_image *dom);
+int xc_dom_gnttab_init(struct xc_dom_image *dom);
+int xc_dom_gnttab_seed(xc_interface *xch, uint32_t guest_domid,
+ bool is_hvm,
+ xen_pfn_t console_gfn,
+ xen_pfn_t xenstore_gfn,
+ uint32_t console_domid,
+ uint32_t xenstore_domid);
+bool xc_dom_translated(const struct xc_dom_image *dom);
+
+/* --- debugging bits ---------------------------------------------- */
+
+int xc_dom_loginit(xc_interface *xch);
+
+void xc_dom_printf(xc_interface *xch, const char *fmt, ...)
+ __attribute__ ((format(printf, 2, 3)));
+void xc_dom_panic_func(xc_interface *xch,
+ const char *file, int line, xc_error_code err,
+ const char *fmt, ...)
+ __attribute__ ((format(printf, 5, 6)));
+
+#define xc_dom_panic(xch, err, fmt, args...) \
+ xc_dom_panic_func(xch, __FILE__, __LINE__, err, fmt, ## args)
+#define xc_dom_trace(mark) \
+ xc_dom_printf("%s:%d: trace %s\n", __FILE__, __LINE__, mark)
+
+void xc_dom_log_memory_footprint(struct xc_dom_image *dom);
+
+/* --- simple memory pool ------------------------------------------ */
+
+void *xc_dom_malloc(struct xc_dom_image *dom, size_t size);
+int xc_dom_register_external(struct xc_dom_image *dom, void *ptr, size_t size);
+void *xc_dom_malloc_page_aligned(struct xc_dom_image *dom, size_t size);
+void *xc_dom_malloc_filemap(struct xc_dom_image *dom,
+ const char *filename, size_t * size,
+ const size_t max_size);
+char *xc_dom_strdup(struct xc_dom_image *dom, const char *str);
+
+/* --- alloc memory pool ------------------------------------------- */
+
+xen_pfn_t xc_dom_alloc_page(struct xc_dom_image *dom, char *name);
+int xc_dom_alloc_segment(struct xc_dom_image *dom,
+ struct xc_dom_seg *seg, char *name,
+ xen_vaddr_t start, xen_vaddr_t size);
+
+/* --- misc bits --------------------------------------------------- */
+
+void *xc_dom_pfn_to_ptr(struct xc_dom_image *dom, xen_pfn_t first,
+ xen_pfn_t count);
+void *xc_dom_pfn_to_ptr_retcount(struct xc_dom_image *dom, xen_pfn_t first,
+ xen_pfn_t count, xen_pfn_t *count_out);
+void xc_dom_unmap_one(struct xc_dom_image *dom, xen_pfn_t pfn);
+void xc_dom_unmap_all(struct xc_dom_image *dom);
+void *xc_dom_vaddr_to_ptr(struct xc_dom_image *dom,
+ xen_vaddr_t vaddr, size_t *safe_region_out);
+uint64_t xc_dom_virt_base(struct xc_dom_image *dom);
+uint64_t xc_dom_virt_entry(struct xc_dom_image *dom);
+uint64_t xc_dom_virt_hypercall(struct xc_dom_image *dom);
+char *xc_dom_guest_os(struct xc_dom_image *dom);
+bool xc_dom_feature_get(struct xc_dom_image *dom, unsigned int nr);
+
+static inline void *xc_dom_seg_to_ptr_pages(struct xc_dom_image *dom,
+ struct xc_dom_seg *seg,
+ xen_pfn_t *pages_out)
+{
+ void *retval;
+
+ retval = xc_dom_pfn_to_ptr(dom, seg->pfn, seg->pages);
+
+ *pages_out = retval ? seg->pages : 0;
+ return retval;
+}
+
+static inline void *xc_dom_seg_to_ptr(struct xc_dom_image *dom,
+ struct xc_dom_seg *seg)
+{
+ xen_pfn_t dummy;
+
+ return xc_dom_seg_to_ptr_pages(dom, seg, &dummy);
+}
+
+static inline xen_pfn_t xc_dom_p2m(struct xc_dom_image *dom, xen_pfn_t pfn)
+{
+ if ( xc_dom_translated(dom) )
+ return pfn;
+
+ /* x86 PV only now. */
+ if ( pfn >= dom->total_pages )
+ return INVALID_MFN;
+
+ return dom->pv_p2m[pfn];
+}
+
+/*
+ * User not using xc_suspend_* / xc_await_suspent may not want to
+ * include the full libxenevtchn API here.
+ */
+struct xenevtchn_handle;
+
+/* For save's precopy_policy(). */
+struct precopy_stats
+{
+ unsigned int iteration;
+ unsigned int total_written;
+ long dirty_count; /* -1 if unknown */
+};
+
+/*
+ * A precopy_policy callback may not be running in the same address
+ * space as libxc an so precopy_stats is passed by value.
+ */
+typedef int (*precopy_policy_t)(struct precopy_stats, void *);
+
+/* callbacks provided by xc_domain_save */
+struct save_callbacks {
+ /*
+ * Called after expiration of checkpoint interval,
+ * to suspend the guest.
+ */
+ int (*suspend)(void *data);
+
+ /*
+ * Called before and after every batch of page data sent during
+ * the precopy phase of a live migration to ask the caller what
+ * to do next based on the current state of the precopy migration.
+ *
+ * Should return one of the values listed below:
+ */
+#define XGS_POLICY_ABORT (-1) /* Abandon the migration entirely
+ * and tidy up. */
+#define XGS_POLICY_CONTINUE_PRECOPY 0 /* Remain in the precopy phase. */
+#define XGS_POLICY_STOP_AND_COPY 1 /* Immediately suspend and transmit the
+ * remaining dirty pages. */
+ precopy_policy_t precopy_policy;
+
+ /*
+ * Called after the guest's dirty pages have been
+ * copied into an output buffer.
+ * Callback function resumes the guest & the device model,
+ * returns to xc_domain_save.
+ * xc_domain_save then flushes the output buffer, while the
+ * guest continues to run.
+ */
+ int (*postcopy)(void *data);
+
+ /*
+ * Called after the memory checkpoint has been flushed
+ * out into the network. Typical actions performed in this
+ * callback include:
+ * (a) send the saved device model state (for HVM guests),
+ * (b) wait for checkpoint ack
+ * (c) release the network output buffer pertaining to the acked checkpoint.
+ * (c) sleep for the checkpoint interval.
+ *
+ * returns:
+ * 0: terminate checkpointing gracefully
+ * 1: take another checkpoint
+ */
+ int (*checkpoint)(void *data);
+
+ /*
+ * Called after the checkpoint callback.
+ *
+ * returns:
+ * 0: terminate checkpointing gracefully
+ * 1: take another checkpoint
+ */
+ int (*wait_checkpoint)(void *data);
+
+ /* Enable qemu-dm logging dirty pages to xen */
+ int (*switch_qemu_logdirty)(uint32_t domid, unsigned enable, void *data); /* HVM only */
+
+ /* to be provided as the last argument to each callback function */
+ void *data;
+};
+
+/* Type of stream. Plain, or using a continuous replication protocol? */
+typedef enum {
+ XC_STREAM_PLAIN,
+ XC_STREAM_REMUS,
+ XC_STREAM_COLO,
+} xc_stream_type_t;
+
+/**
+ * This function will save a running domain.
+ *
+ * @param xch a handle to an open hypervisor interface
+ * @param io_fd the file descriptor to save a domain to
+ * @param dom the id of the domain
+ * @param flags XCFLAGS_xxx
+ * @param stream_type XC_STREAM_PLAIN if the far end of the stream
+ * doesn't use checkpointing
+ * @param recv_fd Only used for XC_STREAM_COLO. Contains backchannel from
+ * the destination side.
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
+ uint32_t flags, struct save_callbacks *callbacks,
+ xc_stream_type_t stream_type, int recv_fd);
+
+/* callbacks provided by xc_domain_restore */
+struct restore_callbacks {
+ /*
+ * Called once the STATIC_DATA_END record has been received/inferred.
+ *
+ * For compatibility with older streams, provides a list of static data
+ * expected to be found in the stream, which was missing. A higher level
+ * toolstack is responsible for providing any necessary compatibiltiy.
+ */
+#define XGR_SDD_MISSING_CPUID (1 << 0)
+#define XGR_SDD_MISSING_MSR (1 << 1)
+ int (*static_data_done)(unsigned int missing, void *data);
+
+ /* Called after a new checkpoint to suspend the guest. */
+ int (*suspend)(void *data);
+
+ /*
+ * Called after the secondary vm is ready to resume.
+ * Callback function resumes the guest & the device model,
+ * returns to xc_domain_restore.
+ */
+ int (*postcopy)(void *data);
+
+ /*
+ * A checkpoint record has been found in the stream.
+ * returns:
+ */
+#define XGR_CHECKPOINT_ERROR 0 /* Terminate processing */
+#define XGR_CHECKPOINT_SUCCESS 1 /* Continue reading more data from the stream */
+#define XGR_CHECKPOINT_FAILOVER 2 /* Failover and resume VM */
+ int (*checkpoint)(void *data);
+
+ /*
+ * Called after the checkpoint callback.
+ *
+ * returns:
+ * 0: terminate checkpointing gracefully
+ * 1: take another checkpoint
+ */
+ int (*wait_checkpoint)(void *data);
+
+ /*
+ * callback to send store gfn and console gfn to xl
+ * if we want to resume vm before xc_domain_save()
+ * exits.
+ */
+ void (*restore_results)(xen_pfn_t store_gfn, xen_pfn_t console_gfn,
+ void *data);
+
+ /* to be provided as the last argument to each callback function */
+ void *data;
+};
+
+/**
+ * This function will restore a saved domain.
+ *
+ * Domain is restored in a suspended state ready to be unpaused.
+ *
+ * @param xch a handle to an open hypervisor interface
+ * @param io_fd the file descriptor to restore a domain from
+ * @param dom the id of the domain
+ * @param store_evtchn the xenstore event channel for this domain to use
+ * @param store_mfn filled with the gfn of the store page
+ * @param store_domid the backend domain for xenstore
+ * @param console_evtchn the console event channel for this domain to use
+ * @param console_mfn filled with the gfn of the console page
+ * @param console_domid the backend domain for xenconsole
+ * @param stream_type XC_STREAM_PLAIN if the far end of the stream is using
+ * checkpointing
+ * @param callbacks non-NULL to receive a callback to restore toolstack
+ * specific data
+ * @param send_back_fd Only used for XC_STREAM_COLO. Contains backchannel to
+ * the source side.
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
+ unsigned int store_evtchn, unsigned long *store_mfn,
+ uint32_t store_domid, unsigned int console_evtchn,
+ unsigned long *console_mfn, uint32_t console_domid,
+ xc_stream_type_t stream_type,
+ struct restore_callbacks *callbacks, int send_back_fd);
+
+/**
+ * This function will create a domain for a paravirtualized Linux
+ * using file names pointing to kernel and ramdisk
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the id of the domain
+ * @parm mem_mb memory size in megabytes
+ * @parm image_name name of the kernel image file
+ * @parm ramdisk_name name of the ramdisk image file
+ * @parm cmdline command line string
+ * @parm flags domain creation flags
+ * @parm store_evtchn the store event channel for this domain to use
+ * @parm store_mfn returned with the mfn of the store page
+ * @parm console_evtchn the console event channel for this domain to use
+ * @parm conole_mfn returned with the mfn of the console page
+ * @return 0 on success, -1 on failure
+ */
+int xc_linux_build(xc_interface *xch,
+ uint32_t domid,
+ unsigned int mem_mb,
+ const char *image_name,
+ const char *ramdisk_name,
+ const char *cmdline,
+ const char *features,
+ unsigned long flags,
+ unsigned int store_evtchn,
+ unsigned long *store_mfn,
+ unsigned int console_evtchn,
+ unsigned long *console_mfn);
+
+/*
+ * Sets *lockfd to -1.
+ * Has deallocated everything even on error.
+ */
+int xc_suspend_evtchn_release(xc_interface *xch,
+ struct xenevtchn_handle *xce,
+ uint32_t domid, int suspend_evtchn, int *lockfd);
+
+/**
+ * This function eats the initial notification.
+ * xce must not be used for anything else
+ * See xc_suspend_evtchn_init_sane re lockfd.
+ */
+int xc_suspend_evtchn_init_exclusive(xc_interface *xch,
+ struct xenevtchn_handle *xce,
+ uint32_t domid, int port, int *lockfd);
+
+/* xce must not be used for anything else */
+int xc_await_suspend(xc_interface *xch, struct xenevtchn_handle *xce,
+ int suspend_evtchn);
+
+/**
+ * The port will be signaled immediately after this call
+ * The caller should check the domain status and look for the next event
+ * On success, *lockfd will be set to >=0 and *lockfd must be preserved
+ * and fed to xc_suspend_evtchn_release. (On error *lockfd is
+ * undefined and xc_suspend_evtchn_release is not allowed.)
+ */
+int xc_suspend_evtchn_init_sane(xc_interface *xch,
+ struct xenevtchn_handle *xce,
+ uint32_t domid, int port, int *lockfd);
+
+int xc_mark_page_online(xc_interface *xch, unsigned long start,
+ unsigned long end, uint32_t *status);
+
+int xc_mark_page_offline(xc_interface *xch, unsigned long start,
+ unsigned long end, uint32_t *status);
+
+int xc_query_page_offline_status(xc_interface *xch, unsigned long start,
+ unsigned long end, uint32_t *status);
+
+int xc_exchange_page(xc_interface *xch, uint32_t domid, xen_pfn_t mfn);
+
+
+/**
+ * Memory related information, such as PFN types, the P2M table,
+ * the guest word width and the guest page table levels.
+ */
+struct xc_domain_meminfo {
+ unsigned int pt_levels;
+ unsigned int guest_width;
+ xen_pfn_t *pfn_type;
+ xen_pfn_t *p2m_table;
+ unsigned long p2m_size;
+};
+
+int xc_map_domain_meminfo(xc_interface *xch, uint32_t domid,
+ struct xc_domain_meminfo *minfo);
+
+int xc_unmap_domain_meminfo(xc_interface *xch, struct xc_domain_meminfo *mem);
+
+/**
+ * This function map m2p table
+ * @parm xch a handle to an open hypervisor interface
+ * @parm max_mfn the max pfn
+ * @parm prot the flags to map, such as read/write etc
+ * @parm mfn0 return the first mfn, can be NULL
+ * @return mapped m2p table on success, NULL on failure
+ */
+xen_pfn_t *xc_map_m2p(xc_interface *xch,
+ unsigned long max_mfn,
+ int prot,
+ unsigned long *mfn0);
+#endif /* XENGUEST_H */
--- /dev/null
+/*
+ * Copyright (c) 2019 SUSE Software Solutions Germany GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef XENHYPFS_H
+#define XENHYPFS_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+/* Callers who don't care don't need to #include <xentoollog.h> */
+struct xentoollog_logger;
+
+typedef struct xenhypfs_handle xenhypfs_handle;
+
+enum xenhypfs_type {
+ xenhypfs_type_dir,
+ xenhypfs_type_blob,
+ xenhypfs_type_string,
+ xenhypfs_type_uint,
+ xenhypfs_type_int,
+ xenhypfs_type_bool,
+};
+
+enum xenhypfs_encoding {
+ xenhypfs_enc_plain,
+ xenhypfs_enc_gzip
+};
+
+struct xenhypfs_dirent {
+ char *name;
+ size_t size;
+ unsigned short type;
+ unsigned short encoding;
+ unsigned int flags;
+#define XENHYPFS_FLAG_WRITABLE 0x00000001
+};
+
+xenhypfs_handle *xenhypfs_open(struct xentoollog_logger *logger,
+ unsigned int open_flags);
+int xenhypfs_close(xenhypfs_handle *fshdl);
+
+/*
+ * Return the raw contents of a Xen hypfs entry and its dirent containing
+ * the size, type and encoding.
+ * Returned buffer and dirent should be freed via free().
+ */
+void *xenhypfs_read_raw(xenhypfs_handle *fshdl, const char *path,
+ struct xenhypfs_dirent **dirent);
+
+/*
+ * Return the contents of a Xen hypfs entry as a string.
+ * Returned buffer should be freed via free().
+ */
+char *xenhypfs_read(xenhypfs_handle *fshdl, const char *path);
+
+/*
+ * Return the contents of a Xen hypfs directory in form of an array of
+ * dirents.
+ * Returned buffer should be freed via free().
+ */
+struct xenhypfs_dirent *xenhypfs_readdir(xenhypfs_handle *fshdl,
+ const char *path,
+ unsigned int *num_entries);
+
+/*
+ * Write a Xen hypfs entry with a value. The value is converted from a string
+ * to the appropriate type.
+ */
+int xenhypfs_write(xenhypfs_handle *fshdl, const char *path, const char *val);
+
+#endif /* XENHYPFS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/* libxenstat: statistics-collection library for Xen
+ * Copyright (C) International Business Machines Corp., 2005
+ * Authors: Josh Triplett <josh@kernel.org>
+ * Judy Fischbach <jfisch@cs.pdx.edu>
+ * David Hendricks <cro_marmot@comcast.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ */
+
+/* libxenstat API */
+
+#ifndef XENSTAT_H
+#define XENSTAT_H
+
+#include <stdbool.h>
+
+/* Opaque handles */
+typedef struct xenstat_handle xenstat_handle;
+typedef struct xenstat_domain xenstat_domain;
+typedef struct xenstat_node xenstat_node;
+typedef struct xenstat_vcpu xenstat_vcpu;
+typedef struct xenstat_network xenstat_network;
+typedef struct xenstat_vbd xenstat_vbd;
+
+/* Initialize the xenstat library. Returns a handle to be used with
+ * subsequent calls to the xenstat library, or NULL if an error occurs. */
+xenstat_handle *xenstat_init(void);
+
+/* Release the handle to libxc, free resources, etc. */
+void xenstat_uninit(xenstat_handle * handle);
+
+/* Flags for types of information to collect in xenstat_get_node */
+#define XENSTAT_VCPU 0x1
+#define XENSTAT_NETWORK 0x2
+#define XENSTAT_XEN_VERSION 0x4
+#define XENSTAT_VBD 0x8
+#define XENSTAT_ALL (XENSTAT_VCPU|XENSTAT_NETWORK|XENSTAT_XEN_VERSION|XENSTAT_VBD)
+
+/* Get all available information about a node */
+xenstat_node *xenstat_get_node(xenstat_handle * handle, unsigned int flags);
+
+/* Free the information */
+void xenstat_free_node(xenstat_node * node);
+
+/*
+ * Node functions - extract information from a xenstat_node
+ */
+
+/* Get information about the domain with the given domain ID */
+xenstat_domain *xenstat_node_domain(xenstat_node * node,
+ unsigned int domid);
+
+/* Get the domain with the given index; used to loop over all domains. */
+xenstat_domain *xenstat_node_domain_by_index(xenstat_node * node,
+ unsigned index);
+
+/* Get xen version of the node */
+const char *xenstat_node_xen_version(xenstat_node * node);
+
+/* Get amount of total memory on a node */
+unsigned long long xenstat_node_tot_mem(xenstat_node * node);
+
+/* Get amount of free memory on a node */
+unsigned long long xenstat_node_free_mem(xenstat_node * node);
+
+/* Get amount of freeable memory on a node */
+long xenstat_node_freeable_mb(xenstat_node * node);
+
+/* Find the number of domains existing on a node */
+unsigned int xenstat_node_num_domains(xenstat_node * node);
+
+/* Find the number of CPUs existing on a node */
+unsigned int xenstat_node_num_cpus(xenstat_node * node);
+
+/* Get information about the CPU speed */
+unsigned long long xenstat_node_cpu_hz(xenstat_node * node);
+
+/*
+ * Domain functions - extract information from a xenstat_domain
+ */
+
+/* Get the domain ID for this domain */
+unsigned xenstat_domain_id(xenstat_domain * domain);
+
+/* Set the domain name for the domain */
+char *xenstat_domain_name(xenstat_domain * domain);
+
+/* Get information about how much CPU time has been used */
+unsigned long long xenstat_domain_cpu_ns(xenstat_domain * domain);
+
+/* Find the number of VCPUs allocated to a domain */
+unsigned int xenstat_domain_num_vcpus(xenstat_domain * domain);
+
+/* Get the VCPU handle to obtain VCPU stats */
+xenstat_vcpu *xenstat_domain_vcpu(xenstat_domain * domain,
+ unsigned int vcpu);
+
+/* Find the current memory reservation for this domain */
+unsigned long long xenstat_domain_cur_mem(xenstat_domain * domain);
+
+/* Find the maximum memory reservation for this domain */
+unsigned long long xenstat_domain_max_mem(xenstat_domain * domain);
+
+/* Find the domain's SSID */
+unsigned int xenstat_domain_ssid(xenstat_domain * domain);
+
+/* Get domain states */
+unsigned int xenstat_domain_dying(xenstat_domain * domain);
+unsigned int xenstat_domain_crashed(xenstat_domain * domain);
+unsigned int xenstat_domain_shutdown(xenstat_domain * domain);
+unsigned int xenstat_domain_paused(xenstat_domain * domain);
+unsigned int xenstat_domain_blocked(xenstat_domain * domain);
+unsigned int xenstat_domain_running(xenstat_domain * domain);
+
+/* Get the number of networks for a given domain */
+unsigned int xenstat_domain_num_networks(xenstat_domain *);
+
+/* Get the network handle to obtain network stats */
+xenstat_network *xenstat_domain_network(xenstat_domain * domain,
+ unsigned int network);
+
+/* Get the number of VBDs for a given domain */
+unsigned int xenstat_domain_num_vbds(xenstat_domain *);
+
+/* Get the VBD handle to obtain VBD stats */
+xenstat_vbd *xenstat_domain_vbd(xenstat_domain * domain,
+ unsigned int vbd);
+
+/*
+ * VCPU functions - extract information from a xenstat_vcpu
+ */
+
+/* Get VCPU usage */
+unsigned int xenstat_vcpu_online(xenstat_vcpu * vcpu);
+unsigned long long xenstat_vcpu_ns(xenstat_vcpu * vcpu);
+
+
+/*
+ * Network functions - extract information from a xenstat_network
+ */
+
+/* Get the ID for this network */
+unsigned int xenstat_network_id(xenstat_network * network);
+
+/* Get the number of receive bytes for this network */
+unsigned long long xenstat_network_rbytes(xenstat_network * network);
+
+/* Get the number of receive packets for this network */
+unsigned long long xenstat_network_rpackets(xenstat_network * network);
+
+/* Get the number of receive errors for this network */
+unsigned long long xenstat_network_rerrs(xenstat_network * network);
+
+/* Get the number of receive drops for this network */
+unsigned long long xenstat_network_rdrop(xenstat_network * network);
+
+/* Get the number of transmit bytes for this network */
+unsigned long long xenstat_network_tbytes(xenstat_network * network);
+
+/* Get the number of transmit packets for this network */
+unsigned long long xenstat_network_tpackets(xenstat_network * network);
+
+/* Get the number of transmit errors for this network */
+unsigned long long xenstat_network_terrs(xenstat_network * network);
+
+/* Get the number of transmit drops for this network */
+unsigned long long xenstat_network_tdrop(xenstat_network * network);
+
+/*
+ * VBD functions - extract information from a xen_vbd
+ */
+
+/* Get the back driver type for Virtual Block Device */
+unsigned int xenstat_vbd_type(xenstat_vbd * vbd);
+
+/* Get the device number for Virtual Block Device */
+unsigned int xenstat_vbd_dev(xenstat_vbd * vbd);
+
+/* Get the number of OO/RD/WR requests for vbd */
+unsigned long long xenstat_vbd_oo_reqs(xenstat_vbd * vbd);
+unsigned long long xenstat_vbd_rd_reqs(xenstat_vbd * vbd);
+unsigned long long xenstat_vbd_wr_reqs(xenstat_vbd * vbd);
+unsigned long long xenstat_vbd_rd_sects(xenstat_vbd * vbd);
+unsigned long long xenstat_vbd_wr_sects(xenstat_vbd * vbd);
+
+/* Returns error while getting stats (1 if error happened, 0 otherwise) */
+bool xenstat_vbd_error(xenstat_vbd * vbd);
+
+#endif /* XENSTAT_H */
--- /dev/null
+#warning xs.h is deprecated use xenstore.h instead
+#include <xenstore.h>
--- /dev/null
+#warning xs_lib.h is deprecated use xenstore_lib.h instead
+#include <xenstore_lib.h>
--- /dev/null
+/*
+ Xen Store Daemon providing simple tree-like database.
+ Copyright (C) 2005 Rusty Russell IBM Corporation
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef XENSTORE_H
+#define XENSTORE_H
+
+#include <xenstore_lib.h>
+
+#define XBT_NULL 0
+
+/* Following open flags are deprecated and ignored! */
+#define XS_OPEN_READONLY (1UL<<0)
+#define XS_OPEN_SOCKETONLY (1UL<<1)
+
+/*
+ * Setting XS_UNWATCH_FILTER arranges that after xs_unwatch, no
+ * related watch events will be delivered via xs_read_watch. But
+ * this relies on the couple token, subpath is unique.
+ *
+ * XS_UNWATCH_FILTER clear XS_UNWATCH_FILTER set
+ *
+ * Even after xs_unwatch, "stale" After xs_unwatch returns, no
+ * instances of the watch event watch events with the same
+ * may be delivered. token and with the same subpath
+ * will be delivered.
+ *
+ * A path and a subpath can be The application must avoid
+ * register with the same token. registering a path (/foo/) and
+ * a subpath (/foo/bar) with the
+ * same path until a successful
+ * xs_unwatch for the first watch
+ * has returned.
+ */
+#define XS_UNWATCH_FILTER (1UL<<2)
+
+struct xs_handle;
+typedef uint32_t xs_transaction_t;
+
+/* IMPORTANT: For details on xenstore protocol limits, see
+ * docs/misc/xenstore.txt in the Xen public source repository, and use the
+ * XENSTORE_*_MAX limit macros defined in xen/io/xs_wire.h.
+ */
+
+/* On failure, these routines set errno. */
+
+/* Open a connection to the xs daemon.
+ * Attempts to make a connection over the socket interface,
+ * and if it fails, then over the xenbus interface.
+ *
+ * * Connections made with xs_open(0) (which might be shared page or
+ * socket based) are only guaranteed to work in the parent after
+ * fork.
+ * * xs_daemon_open*() and xs_domain_open() are deprecated synonyms
+ * for xs_open(0).
+ *
+ * Returns a handle or NULL.
+ */
+struct xs_handle *xs_open(unsigned long flags);
+
+/* Close the connection to the xs daemon. */
+void xs_close(struct xs_handle *xsh /* NULL ok */);
+
+/* Connect to the xs daemon.
+ * Returns a handle or NULL.
+ * Deprecated, please use xs_open(0) instead
+ */
+struct xs_handle *xs_daemon_open(void);
+struct xs_handle *xs_domain_open(void);
+struct xs_handle *xs_daemon_open_readonly(void);
+
+/* Close the connection to the xs daemon.
+ * Deprecated, please use xs_close() instead
+ */
+void xs_daemon_close(struct xs_handle *);
+
+/* Throw away the connection to the xs daemon, for use after fork(). */
+void xs_daemon_destroy_postfork(struct xs_handle *);
+
+/* Get contents of a directory.
+ * Returns a malloced array: call free() on it after use.
+ * Num indicates size.
+ * Returns NULL on failure.
+ */
+char **xs_directory(struct xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *num);
+
+/* Get the value of a single file, nul terminated.
+ * Returns a malloced value: call free() on it after use.
+ * len indicates length in bytes, not including terminator.
+ * Returns NULL on failure.
+ */
+void *xs_read(struct xs_handle *h, xs_transaction_t t,
+ const char *path, unsigned int *len);
+
+/* Write the value of a single file.
+ * Returns false on failure.
+ */
+bool xs_write(struct xs_handle *h, xs_transaction_t t,
+ const char *path, const void *data, unsigned int len);
+
+/* Create a new directory.
+ * Returns false on failure, or success if it already exists.
+ */
+bool xs_mkdir(struct xs_handle *h, xs_transaction_t t,
+ const char *path);
+
+/* Destroy a file or directory (and children).
+ * Returns false on failure, or if it doesn't exist.
+ */
+bool xs_rm(struct xs_handle *h, xs_transaction_t t,
+ const char *path);
+
+/* Fake function which will always return false (required to let
+ * libxenstore remain at 3.0 version.
+ */
+bool xs_restrict(struct xs_handle *h, unsigned domid);
+
+/* Get permissions of node (first element is owner, first perms is "other").
+ * Returns malloced array, or NULL: call free() after use.
+ */
+struct xs_permissions *xs_get_permissions(struct xs_handle *h,
+ xs_transaction_t t,
+ const char *path, unsigned int *num);
+
+/* Set permissions of node (must be owner). Returns false on failure.
+ *
+ * Domain 0 may read / write anywhere in the store, regardless of
+ * permission settings.
+ *
+ * Note:
+ * The perms array is a list of (domid, permissions) pairs. The first
+ * element in the list specifies the owner of the list, plus the flags
+ * for every domain not explicitly specified subsequently. The
+ * subsequent entries are normal capabilities.
+ *
+ * Example C code:
+ *
+ * struct xs_permissions perms[2];
+ *
+ * perms[0].id = dm_domid;
+ * perms[0].perms = XS_PERM_NONE;
+ * perms[1].id = guest_domid;
+ * perms[1].perms = XS_PERM_READ;
+ *
+ * It means the owner of the path is domain $dm_domid (hence it always
+ * has read and write permission), all other domains (unless specified
+ * in subsequent pair) can neither read from nor write to that
+ * path. It then specifies domain $guest_domid can read from that
+ * path.
+ */
+bool xs_set_permissions(struct xs_handle *h, xs_transaction_t t,
+ const char *path, struct xs_permissions *perms,
+ unsigned int num_perms);
+
+/* Watch a node for changes (poll on fd to detect, or call read_watch()).
+ * When the node (or any child) changes, fd will become readable.
+ * Token is returned when watch is read, to allow matching.
+ * Returns false on failure.
+ */
+bool xs_watch(struct xs_handle *h, const char *path, const char *token);
+
+/* Return the FD to poll on to see if a watch has fired. */
+int xs_fileno(struct xs_handle *h);
+
+/* Check for node changes. On success, returns a non-NULL pointer ret
+ * such that ret[0] and ret[1] are valid C strings, namely the
+ * triggering path (see docs/misc/xenstore.txt) and the token (from
+ * xs_watch). On error return value is NULL setting errno.
+ *
+ * Callers should, after xs_fileno has become readable, repeatedly
+ * call xs_check_watch until it returns NULL and sets errno to EAGAIN.
+ * (If the fd became readable, xs_check_watch is allowed to make it no
+ * longer show up as readable even if future calls to xs_check_watch
+ * will return more watch events.)
+ *
+ * After the caller is finished with the returned information it
+ * should be freed all in one go with free(ret).
+ */
+char **xs_check_watch(struct xs_handle *h);
+
+/* Find out what node change was on (will block if nothing pending).
+ * Returns array containing the path and token, or NULL.
+ * Use XS_WATCH_* to access these elements.
+ * Call free() after use.
+ */
+char **xs_read_watch(struct xs_handle *h, unsigned int *num);
+
+/* Remove a watch on a node: implicitly acks any outstanding watch.
+ * Returns false on failure (no watch on that node).
+ */
+bool xs_unwatch(struct xs_handle *h, const char *path, const char *token);
+
+/* Start a transaction: changes by others will not be seen during this
+ * transaction, and changes will not be visible to others until end.
+ * Returns NULL on failure.
+ */
+xs_transaction_t xs_transaction_start(struct xs_handle *h);
+
+/* End a transaction.
+ * If abandon is true, transaction is discarded instead of committed.
+ * Returns false on failure: if errno == EAGAIN, you have to restart
+ * transaction.
+ */
+bool xs_transaction_end(struct xs_handle *h, xs_transaction_t t,
+ bool abort);
+
+/* Introduce a new domain.
+ * This tells the store daemon about a shared memory page, event channel and
+ * store path associated with a domain: the domain uses these to communicate.
+ */
+bool xs_introduce_domain(struct xs_handle *h,
+ unsigned int domid,
+ unsigned long mfn,
+ unsigned int eventchn);
+
+/* Set the target of a domain
+ * This tells the store daemon that a domain is targetting another one, so
+ * it should let it tinker with it.
+ */
+bool xs_set_target(struct xs_handle *h,
+ unsigned int domid,
+ unsigned int target);
+
+/* Resume a domain.
+ * Clear the shutdown flag for this domain in the store.
+ */
+bool xs_resume_domain(struct xs_handle *h, unsigned int domid);
+
+/* Release a domain.
+ * Tells the store domain to release the memory page to the domain.
+ */
+bool xs_release_domain(struct xs_handle *h, unsigned int domid);
+
+/* Query the home path of a domain. Call free() after use.
+ */
+char *xs_get_domain_path(struct xs_handle *h, unsigned int domid);
+
+/* Returns true if child is either equal to parent, or a node underneath
+ * parent; or false otherwise. Done by string comparison, so relative and
+ * absolute pathnames never in a parent/child relationship by this
+ * definition. Cannot fail.
+ */
+bool xs_path_is_subpath(const char *parent, const char *child);
+
+/* Return whether the domain specified has been introduced to xenstored.
+ */
+bool xs_is_domain_introduced(struct xs_handle *h, unsigned int domid);
+
+char *xs_control_command(struct xs_handle *h, const char *cmd,
+ void *data, unsigned int len);
+/* Deprecated: use xs_control_command() instead. */
+char *xs_debug_command(struct xs_handle *h, const char *cmd,
+ void *data, unsigned int len);
+
+int xs_suspend_evtchn_port(int domid);
+#endif /* XENSTORE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
--- /dev/null
+/*
+ Common routines between Xen store user library and daemon.
+ Copyright (C) 2005 Rusty Russell IBM Corporation
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef XENSTORE_LIB_H
+#define XENSTORE_LIB_H
+
+#include <stddef.h>
+#include <stdbool.h>
+#include <limits.h>
+#include <errno.h>
+#include <stdint.h>
+#include <xen/io/xs_wire.h>
+
+/* Bitmask of permissions. */
+enum xs_perm_type {
+ XS_PERM_NONE = 0,
+ XS_PERM_READ = 1,
+ XS_PERM_WRITE = 2,
+ /* Internal use. */
+ XS_PERM_ENOENT_OK = 4,
+ XS_PERM_OWNER = 8,
+};
+
+struct xs_permissions
+{
+ unsigned int id;
+ enum xs_perm_type perms;
+};
+
+/* Header of the node record in tdb. */
+struct xs_tdb_record_hdr {
+ uint64_t generation;
+ uint32_t num_perms;
+ uint32_t datalen;
+ uint32_t childlen;
+ struct xs_permissions perms[0];
+};
+
+/* Each 10 bits takes ~ 3 digits, plus one, plus one for nul terminator. */
+#define MAX_STRLEN(x) ((sizeof(x) * CHAR_BIT + CHAR_BIT-1) / 10 * 3 + 2)
+
+/* Path for various daemon things: env vars can override. */
+const char *xs_daemon_rootdir(void);
+const char *xs_daemon_rundir(void);
+const char *xs_daemon_socket(void);
+const char *xs_daemon_socket_ro(void);
+const char *xs_domain_dev(void);
+const char *xs_daemon_tdb(void);
+
+/* Simple write function: loops for you. */
+bool xs_write_all(int fd, const void *data, unsigned int len);
+
+/* Convert strings to permissions. False if a problem. */
+bool xs_strings_to_perms(struct xs_permissions *perms, unsigned int num,
+ const char *strings);
+
+/* Convert permissions to a string (up to len MAX_STRLEN(unsigned int)+1). */
+bool xs_perm_to_string(const struct xs_permissions *perm,
+ char *buffer, size_t buf_len);
+
+/* Given a string and a length, count how many strings (nul terms). */
+unsigned int xs_count_strings(const char *strings, unsigned int len);
+
+/* Sanitising (quoting) possibly-binary strings. */
+struct expanding_buffer {
+ char *buf;
+ int avail;
+};
+
+/* Ensure that given expanding buffer has at least min_avail characters. */
+char *expanding_buffer_ensure(struct expanding_buffer *, int min_avail);
+
+/* sanitise_value() may return NULL if malloc fails. */
+char *sanitise_value(struct expanding_buffer *, const char *val, unsigned len);
+
+/* *out_len_r on entry is ignored; out must be at least strlen(in)+1 bytes. */
+void unsanitise_value(char *out, unsigned *out_len_r, const char *in);
+
+#endif /* XENSTORE_LIB_H */
--- /dev/null
+/*
+ * xentoolcore.h
+ *
+ * Copyright (c) 2017 Citrix
+ *
+ * Common features used/provided by all Xen tools libraries
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XENTOOLCORE_H
+#define XENTOOLCORE_H
+
+#include <stdint.h>
+#include <xen/xen.h>
+
+/*
+ * int xentoolcore_restrict_all(domid_t domid);
+ *
+ * Arranges that Xen library handles (fds etc.) which are currently held
+ * by Xen libraries, can no longer be used other than to affect domid.
+ *
+ * Does not prevent effects that amount only to
+ * - denial of service, possibly host-wide, by resource exhaustion etc.
+ *
+ * If this cannot be achieved, returns -1 and sets errno.
+ * If called again with the same domid, it may succeed, or it may
+ * fail (even though such a call is potentially meaningful).
+ * (If called again with a different domid, it will necessarily fail.)
+ *
+ * Note for multi-threaded programs: If xentoolcore_restrict_all is
+ * called concurrently with a function which /or closes Xen library
+ * handles (e.g. libxl_ctx_free, xs_close), the restriction is only
+ * guaranteed to be effective after all of the closing functions have
+ * returned, even if that is later than the return from
+ * xentoolcore_restrict_all. (Of course if xentoolcore_restrict_all
+ * it is called concurrently with opening functions, the new handles
+ * might or might not be restricted.)
+ *
+ * ====================================================================
+ * IMPORTANT - IMPLEMENTATION STATUS
+ *
+ * This function has been implemented insofar as it appears necessary
+ * for the purposes of running a deprivileged qemu, and is believed to
+ * be sufficient (subject to the caveats discussed in the appropriate
+ * libxl documentation for this feature).
+ *
+ * However, this function is NOT implemented for all Xen libraries.
+ * For each use case of this function, the designer must evaluate and
+ * audit whether the implementation is sufficient in their specific
+ * context.
+ *
+ * Of course, patches to extend the implementation are very welcome.
+ * ====================================================================
+ *
+ * Thread safe.
+ *
+ * We expect that no callers do the following:
+ * - in one thread call xen_somelibrary_open|close
+ * - in another thread call fork
+ * - in the child of the fork, before exec, call
+ * xen_some[other]library_open|close or xentoolcore_restrict_all
+ *
+ */
+int xentoolcore_restrict_all(domid_t domid);
+
+#endif /* XENTOOLCORE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * xentoolcore_internal.h
+ *
+ * Interfaces of xentoolcore directed internally at other Xen libraries
+ *
+ * Copyright (c) 2017 Citrix
+ *
+ * Common code used by all Xen tools libraries
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XENTOOLCORE_INTERNAL_H
+#define XENTOOLCORE_INTERNAL_H
+
+#include <stddef.h>
+
+#include "xentoolcore.h"
+#include "_xentoolcore_list.h"
+
+/*---------- active handle registration ----------*/
+
+/*
+ * This is all to support xentoolcore_restrict_all
+ *
+ * Any libxl library that opens a Xen control handle of any kind which
+ * might allow manipulation of dom0, of other domains, or of the whole
+ * machine, must:
+ * I. arrange that their own datastructure contains a
+ * Xentoolcore__Active_Handle
+ *
+ * II. during the "open handle" function
+ * 1. allocate the memory for the own datastructure and initialise it
+ * 2. set Xentoolcore__Active_Handle.restrict_callback
+ * 3. call xentoolcore__register_active_handle
+ * 3a. if the open fails, call xentoolcore__deregister_active_handle
+ * 4. ONLY THEN actually open the relevant fd or whatever
+ *
+ * III. during the "close handle" function
+ * 1. FIRST call xentoolcore__deregister_active_handle
+ * 2. close the relevant fd or whatever
+ *
+ * [ III(b). Do the same as III for error exit from the open function. ]
+ *
+ * IV. in the restrict_callback function
+ * * Arrange that the fd (or other handle) can no longer by used
+ * other than with respect to domain domid.
+ * * Future attempts to manipulate other domains (or the whole
+ * host) via this handle must cause an error return (and
+ * perhaps a log message), not a crash
+ * * If selective restriction is not possible, the handle must
+ * be completely invalidated so that it is not useable;
+ * subsequent manipulations may not crash
+ * * The restrict_callback function should not normally fail
+ * if this can be easily avoided - it is better to make the
+ * handle nonfunction instead.
+ * * NB that restrict_callback might be called again. That must
+ * work properly: if the domid is the same, it is idempotent.
+ * If the domid is different. then either the handle must be
+ * completely invalidated, or restrict_callback must fail.)
+ *
+ * Thread safety:
+ * xentoolcore__[de]register_active_handle are threadsafe
+ * but MUST NOT be called within restrict_callback
+ *
+ * Fork safety:
+ * Libraries which use these functions do not on that account
+ * need to take any special care over forks occurring in
+ * other threads, provided that they obey the rules above.
+ */
+
+typedef struct Xentoolcore__Active_Handle Xentoolcore__Active_Handle;
+
+typedef int Xentoolcore__Restrict_Callback(Xentoolcore__Active_Handle*,
+ domid_t domid);
+
+struct Xentoolcore__Active_Handle {
+ Xentoolcore__Restrict_Callback *restrict_callback;
+ XENTOOLCORE_LIST_ENTRY(Xentoolcore__Active_Handle) entry;
+};
+
+void xentoolcore__register_active_handle(Xentoolcore__Active_Handle*);
+void xentoolcore__deregister_active_handle(Xentoolcore__Active_Handle*);
+
+/*
+ * Utility function for use in restrict_callback in libraries whose
+ * handles don't have a useful restrict function. We neuter the fd by
+ * dup'ing /dev/null onto it. This is better than closing it, because
+ * it does not involve locking against concurrent uses of in other
+ * threads.
+ *
+ * Returns the value that restrict_callback should return.
+ * fd may be < 0.
+ */
+int xentoolcore__restrict_by_dup2_null(int fd);
+
+/* ---------- convenient stuff ---------- */
+
+/*
+ * This does not appear in xentoolcore.h because it is a bit
+ * namespace-unclean.
+ */
+
+/*
+ * Convenience macros.
+ */
+
+/*
+ * CONTAINER_OF work like this. Given:
+ * typedef struct {
+ * ...
+ * member_type member_name;
+ * ...
+ * } outer_type;
+ * outer_type outer, *outer_var;
+ * member_type *inner_ptr = &outer->member_name;
+ *
+ * Then, effectively:
+ * outer_type *CONTAINER_OF(member_type *inner_ptr,
+ * *outer_var, // or type name for outer_type
+ * member_name);
+ *
+ * So that:
+ * CONTAINER_OF(inner_ptr, *outer_var, member_name) == &outer
+ * CONTAINER_OF(inner_ptr, outer_type, member_name) == &outer
+ */
+#define CONTAINER_OF(inner_ptr, outer, member_name) \
+ ({ \
+ typeof(outer) *container_of_; \
+ container_of_ = (void*)((char*)(inner_ptr) - \
+ offsetof(typeof(outer), member_name)); \
+ (void)(&container_of_->member_name == \
+ (typeof(inner_ptr))0) /* type check */; \
+ container_of_; \
+ })
+
+#endif /* XENTOOLCORE_INTERNAL_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--- /dev/null
+/*
+ * xentoollog.h
+ *
+ * Copyright (c) 2010 Citrix
+ * Part of a generic logging interface used by various dom0 userland libraries.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XENTOOLLOG_H
+#define XENTOOLLOG_H
+
+#include <stdio.h>
+#include <stdarg.h>
+
+
+/*---------- common declarations and types ----------*/
+
+typedef enum xentoollog_level {
+ XTL_NONE, /* sentinel etc, never used for logging */
+ XTL_DEBUG,
+ XTL_VERBOSE,
+ XTL_DETAIL,
+ XTL_PROGRESS, /* also used for "progress" messages */
+ XTL_INFO,
+ XTL_NOTICE,
+ XTL_WARN,
+ XTL_ERROR,
+ XTL_CRITICAL,
+ XTL_NUM_LEVELS
+} xentoollog_level;
+
+typedef struct xentoollog_logger xentoollog_logger;
+struct xentoollog_logger {
+ void (*vmessage)(struct xentoollog_logger *logger,
+ xentoollog_level level,
+ int errnoval /* or -1 */,
+ const char *context /* eg "xc", "xl", may be 0 */,
+ const char *format /* without level, context, \n */,
+ va_list al)
+ __attribute__((format(printf,5,0)));
+ void (*progress)(struct xentoollog_logger *logger,
+ const char *context /* see above */,
+ const char *doing_what /* no \r,\n */,
+ int percent, unsigned long done, unsigned long total)
+ /* null function pointer is ok.
+ * will always be called with done==0 for each new
+ * context/doing_what */;
+ void (*destroy)(struct xentoollog_logger *logger);
+ /* each logger can put its necessary data here */
+};
+
+
+/*---------- facilities for consuming log messages ----------*/
+
+#define XTL_STDIOSTREAM_SHOW_PID 001u
+#define XTL_STDIOSTREAM_SHOW_DATE 002u
+#define XTL_STDIOSTREAM_HIDE_PROGRESS 004u
+#define XTL_STDIOSTREAM_PROGRESS_USE_CR 010u /* default is to */
+#define XTL_STDIOSTREAM_PROGRESS_NO_CR 020u /* use \r to ttys */
+
+typedef struct xentoollog_logger_stdiostream xentoollog_logger_stdiostream;
+
+xentoollog_logger_stdiostream *xtl_createlogger_stdiostream
+ (FILE *f, xentoollog_level min_level, unsigned flags);
+ /* may return 0 if malloc fails, in which case error was logged */
+ /* destroy on this logger does not close the file */
+
+void xtl_stdiostream_set_minlevel(xentoollog_logger_stdiostream*,
+ xentoollog_level min_level);
+void xtl_stdiostream_adjust_flags(xentoollog_logger_stdiostream*,
+ unsigned set_flags, unsigned clear_flags);
+ /* if set_flags and clear_flags overlap, set_flags takes precedence */
+
+void xtl_logger_destroy(struct xentoollog_logger *logger /* 0 is ok */);
+
+
+/*---------- facilities for generating log messages ----------*/
+
+void xtl_logv(struct xentoollog_logger *logger,
+ xentoollog_level level,
+ int errnoval /* or -1 */,
+ const char *context /* eg "xc", "xenstore", "xl", may be 0 */,
+ const char *format /* does not contain \n */,
+ va_list) __attribute__((format(printf,5,0)));
+
+void xtl_log(struct xentoollog_logger *logger,
+ xentoollog_level level,
+ int errnoval /* or -1 */,
+ const char *context /* eg "xc", "xenstore", "xl" */,
+ const char *format /* does not contain \n */,
+ ...) __attribute__((format(printf,5,6)));
+
+void xtl_progress(struct xentoollog_logger *logger,
+ const char *context /* see above, may be 0 */,
+ const char *doing_what,
+ unsigned long done, unsigned long total);
+
+
+/*---------- facilities for defining log message consumers ----------*/
+
+const char *xtl_level_to_string(xentoollog_level); /* never fails */
+
+
+#define XTL_NEW_LOGGER(LOGGER,buffer) ({ \
+ xentoollog_logger_##LOGGER *new_consumer; \
+ \
+ (buffer).vtable.vmessage = LOGGER##_vmessage; \
+ (buffer).vtable.progress = LOGGER##_progress; \
+ (buffer).vtable.destroy = LOGGER##_destroy; \
+ \
+ new_consumer = malloc(sizeof(*new_consumer)); \
+ if (!new_consumer) { \
+ xtl_log((xentoollog_logger*)&buffer, \
+ XTL_CRITICAL, errno, "xtl", \
+ "failed to allocate memory for new message logger"); \
+ } else { \
+ *new_consumer = buffer; \
+ } \
+ \
+ new_consumer; \
+});
+
+
+#endif /* XENTOOLLOG_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
SRCS-$(CONFIG_MiniOS) += minios.c
include $(XEN_ROOT)/tools/libs/libs.mk
-
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxencall)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
+++ /dev/null
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef XENCALL_H
-#define XENCALL_H
-
-/*
- * This library allows you to make arbitrary hypercalls (subject to
- * sufficient permission for the process and the domain itself). Note
- * that while the library interface is stable the hypercalls are
- * subject to their own rules.
- */
-
-#include <stdint.h>
-#include <stddef.h>
-
-/* Callers who don't care don't need to #include <xentoollog.h> */
-struct xentoollog_logger;
-
-typedef struct xencall_handle xencall_handle;
-
-/*
- */
-#define XENCALL_OPENFLAG_NON_REENTRANT (1U<<0)
-
-/*
- * Return a handle onto the hypercall driver. Logs errors.
- * *
- * Note: After fork(2) a child process must not use any opened
- * xencall handle inherited from their parent, nor access any
- * hypercall argument buffers associated with that handle.
- *
- * The child must open a new handle if they want to interact with
- * xencall.
- *
- * Calling exec(2) in a child will safely (and reliably) reclaim any
- * resources which were allocated via a xencall_handle in the parent.
- *
- * A child which does not call exec(2) may safely call xencall_close()
- * on a xencall_handle inherited from their parent. This will attempt
- * to reclaim any resources associated with that handle. Note that in
- * some implementations this reclamation may not be completely
- * effective, in this case any affected resources remain allocated.
- *
- * Calling xencall_close() is the only safe operation on a
- * xencall_handle which has been inherited.
- */
-xencall_handle *xencall_open(struct xentoollog_logger *logger,
- unsigned open_flags);
-
-/*
- * Close a handle previously allocated with xencall_open().
- *
- * Under normal circumstances (i.e. not in the child after a fork) any
- * allocated hypercall argument buffers should be freed using the
- * appropriate xencall_free_*() prior to closing the handle in order
- * to free up resources associated with those mappings.
- *
- * This is the only function which may be safely called on a
- * xencall_handle in a child after a fork. xencall_free_*() must not
- * be called under such circumstances.
- */
-int xencall_close(xencall_handle *xcall);
-
-/*
- * Return the fd used internally by xencall. selecting on it is not
- * useful. But it could be useful for unusual use cases; perhaps,
- * passing to other programs, calling ioctls on directly, or maybe
- * calling fcntl.
- */
-int xencall_fd(xencall_handle *xcall);
-
-/*
- * Call hypercalls with varying numbers of arguments.
- *
- * On success the return value of the hypercall is the return value of
- * the xencall function. On error these functions set errno and
- * return -1.
- *
- * The errno values will be either:
- * - The Xen hypercall error return (from xen/include/public/errno.h)
- * translated into the corresponding local value for that POSIX error.
- * - An errno value produced by the OS driver or the library
- * implementation. Such values may be defined by POSIX or by the OS.
- *
- * Note that under some circumstances it will not be possible to tell
- * whether an error came from Xen or from the OS/library.
- *
- * These functions never log.
- */
-int xencall0(xencall_handle *xcall, unsigned int op);
-int xencall1(xencall_handle *xcall, unsigned int op,
- uint64_t arg1);
-int xencall2(xencall_handle *xcall, unsigned int op,
- uint64_t arg1, uint64_t arg2);
-int xencall3(xencall_handle *xcall, unsigned int op,
- uint64_t arg1, uint64_t arg2, uint64_t arg3);
-int xencall4(xencall_handle *xcall, unsigned int op,
- uint64_t arg1, uint64_t arg2, uint64_t arg3,
- uint64_t arg4);
-int xencall5(xencall_handle *xcall, unsigned int op,
- uint64_t arg1, uint64_t arg2, uint64_t arg3,
- uint64_t arg4, uint64_t arg5);
-
-/*
- * Allocate and free memory which is suitable for use as a pointer
- * argument to a hypercall.
- */
-void *xencall_alloc_buffer_pages(xencall_handle *xcall, size_t nr_pages);
-void xencall_free_buffer_pages(xencall_handle *xcall, void *p, size_t nr_pages);
-
-void *xencall_alloc_buffer(xencall_handle *xcall, size_t size);
-void xencall_free_buffer(xencall_handle *xcall, void *p);
-
-/*
- * Are allocated hypercall buffers safe to be accessed by the hypervisor all
- * the time?
- * Returns 0 if EFAULT might be possible.
- */
-int xencall_buffers_never_fault(xencall_handle *xcall);
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
$(LIB_OBJS) $(PIC_OBJS): _paths.h
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenctrl)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
-
clean: cleanlocal
.PHONY: cleanlocal
+++ /dev/null
-/******************************************************************************
- * xenctrl.h
- *
- * A library for low-level access to the Xen control interfaces.
- *
- * Copyright (c) 2003-2004, K A Fraser.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef XENCTRL_H
-#define XENCTRL_H
-
-/* Tell the Xen public headers we are a user-space tools build. */
-#ifndef __XEN_TOOLS__
-#define __XEN_TOOLS__ 1
-#endif
-
-#include <unistd.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <xen/xen.h>
-#include <xen/domctl.h>
-#include <xen/physdev.h>
-#include <xen/sysctl.h>
-#include <xen/version.h>
-#include <xen/event_channel.h>
-#include <xen/sched.h>
-#include <xen/memory.h>
-#include <xen/grant_table.h>
-#include <xen/hvm/dm_op.h>
-#include <xen/hvm/params.h>
-#include <xen/xsm/flask_op.h>
-#include <xen/kexec.h>
-#include <xen/platform.h>
-
-#include "xentoollog.h"
-
-#if defined(__i386__) || defined(__x86_64__)
-#include <xen/foreign/x86_32.h>
-#include <xen/foreign/x86_64.h>
-#include <xen/arch-x86/xen-mca.h>
-#endif
-
-#define XC_PAGE_SHIFT 12
-#define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
-#define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
-
-#define INVALID_MFN (~0UL)
-
-/*
- * DEFINITIONS FOR CPU BARRIERS
- */
-
-#define xen_barrier() asm volatile ( "" : : : "memory")
-
-#if defined(__i386__)
-#define xen_mb() asm volatile ( "lock addl $0, -4(%%esp)" ::: "memory" )
-#define xen_rmb() xen_barrier()
-#define xen_wmb() xen_barrier()
-#elif defined(__x86_64__)
-#define xen_mb() asm volatile ( "lock addl $0, -32(%%rsp)" ::: "memory" )
-#define xen_rmb() xen_barrier()
-#define xen_wmb() xen_barrier()
-#elif defined(__arm__)
-#define xen_mb() asm volatile ("dmb" : : : "memory")
-#define xen_rmb() asm volatile ("dmb" : : : "memory")
-#define xen_wmb() asm volatile ("dmb" : : : "memory")
-#elif defined(__aarch64__)
-#define xen_mb() asm volatile ("dmb sy" : : : "memory")
-#define xen_rmb() asm volatile ("dmb sy" : : : "memory")
-#define xen_wmb() asm volatile ("dmb sy" : : : "memory")
-#else
-#error "Define barriers"
-#endif
-
-
-#define XENCTRL_HAS_XC_INTERFACE 1
-/* In Xen 4.0 and earlier, xc_interface_open and xc_evtchn_open would
- * both return ints being the file descriptor. In 4.1 and later, they
- * return an xc_interface* and xc_evtchn*, respectively - ie, a
- * pointer to an opaque struct. This #define is provided in 4.1 and
- * later, allowing out-of-tree callers to more easily distinguish
- * between, and be compatible with, both versions.
- */
-
-
-/*
- * GENERAL
- *
- * Unless otherwise specified, each function here returns zero or a
- * non-null pointer on success; or in case of failure, sets errno and
- * returns -1 or a null pointer.
- *
- * Unless otherwise specified, errors result in a call to the error
- * handler function, which by default prints a message to the
- * FILE* passed as the caller_data, which by default is stderr.
- * (This is described below as "logging errors".)
- *
- * The error handler can safely trash errno, as libxc saves it across
- * the callback.
- */
-
-typedef struct xc_interface_core xc_interface;
-
-enum xc_error_code {
- XC_ERROR_NONE = 0,
- XC_INTERNAL_ERROR = 1,
- XC_INVALID_KERNEL = 2,
- XC_INVALID_PARAM = 3,
- XC_OUT_OF_MEMORY = 4,
- /* new codes need to be added to xc_error_level_to_desc too */
-};
-
-typedef enum xc_error_code xc_error_code;
-
-
-/*
- * INITIALIZATION FUNCTIONS
- */
-
-/**
- * This function opens a handle to the hypervisor interface. This function can
- * be called multiple times within a single process. Multiple processes can
- * have an open hypervisor interface at the same time.
- *
- * Note:
- * After fork a child process must not use any opened xc interface
- * handle inherited from their parent. They must open a new handle if
- * they want to interact with xc.
- *
- * Each call to this function should have a corresponding call to
- * xc_interface_close().
- *
- * This function can fail if the caller does not have superuser permission or
- * if a Xen-enabled kernel is not currently running.
- *
- * @return a handle to the hypervisor interface
- */
-xc_interface *xc_interface_open(xentoollog_logger *logger,
- xentoollog_logger *dombuild_logger,
- unsigned open_flags);
- /* if logger==NULL, will log to stderr
- * if dombuild_logger=NULL, will log to a file
- */
-
-/*
- * Note: if XC_OPENFLAG_NON_REENTRANT is passed then libxc must not be
- * called reentrantly and the calling application is responsible for
- * providing mutual exclusion surrounding all libxc calls itself.
- *
- * In particular xc_{get,clear}_last_error only remain valid for the
- * duration of the critical section containing the call which failed.
- */
-enum xc_open_flags {
- XC_OPENFLAG_DUMMY = 1<<0, /* do not actually open a xenctrl interface */
- XC_OPENFLAG_NON_REENTRANT = 1<<1, /* assume library is only every called from a single thread */
-};
-
-/**
- * This function closes an open hypervisor interface.
- *
- * This function can fail if the handle does not represent an open interface or
- * if there were problems closing the interface. In the latter case
- * the interface is still closed.
- *
- * @parm xch a handle to an open hypervisor interface
- * @return 0 on success, -1 otherwise.
- */
-int xc_interface_close(xc_interface *xch);
-
-/**
- * Return the handles which xch has opened and will use for
- * hypercalls, foreign memory accesses and device model operations.
- * These may be used with the corresponding libraries so long as the
- * xch itself remains open.
- */
-struct xencall_handle *xc_interface_xcall_handle(xc_interface *xch);
-struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch);
-struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch);
-
-/*
- * HYPERCALL SAFE MEMORY BUFFER
- *
- * Ensure that memory which is passed to a hypercall has been
- * specially allocated in order to be safe to access from the
- * hypervisor.
- *
- * Each user data pointer is shadowed by an xc_hypercall_buffer data
- * structure. You should never define an xc_hypercall_buffer type
- * directly, instead use the DECLARE_HYPERCALL_BUFFER* macros below.
- *
- * The strucuture should be considered opaque and all access should be
- * via the macros and helper functions defined below.
- *
- * Once the buffer is declared the user is responsible for explicitly
- * allocating and releasing the memory using
- * xc_hypercall_buffer_alloc(_pages) and
- * xc_hypercall_buffer_free(_pages).
- *
- * Once the buffer has been allocated the user can initialise the data
- * via the normal pointer. The xc_hypercall_buffer structure is
- * transparently referenced by the helper macros (such as
- * xen_set_guest_handle) in order to check at compile time that the
- * correct type of memory is being used.
- */
-struct xc_hypercall_buffer {
- /* Hypercall safe memory buffer. */
- void *hbuf;
-
- /*
- * Reference to xc_hypercall_buffer passed as argument to the
- * current function.
- */
- struct xc_hypercall_buffer *param_shadow;
-
- /*
- * Direction of copy for bounce buffering.
- */
- int dir;
-
- /* Used iff dir != 0. */
- void *ubuf;
- size_t sz;
-};
-typedef struct xc_hypercall_buffer xc_hypercall_buffer_t;
-
-/*
- * Construct the name of the hypercall buffer for a given variable.
- * For internal use only
- */
-#define XC__HYPERCALL_BUFFER_NAME(_name) xc__hypercall_buffer_##_name
-
-/*
- * Returns the hypercall_buffer associated with a variable.
- */
-#define HYPERCALL_BUFFER(_name) \
- ({ xc_hypercall_buffer_t _hcbuf_buf1; \
- typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_buf2 = \
- &XC__HYPERCALL_BUFFER_NAME(_name); \
- (void)(&_hcbuf_buf1 == _hcbuf_buf2); \
- (_hcbuf_buf2)->param_shadow ? \
- (_hcbuf_buf2)->param_shadow : (_hcbuf_buf2); \
- })
-
-#define HYPERCALL_BUFFER_INIT_NO_BOUNCE .dir = 0, .sz = 0, .ubuf = (void *)-1
-
-/*
- * Defines a hypercall buffer and user pointer with _name of _type.
- *
- * The user accesses the data as normal via _name which will be
- * transparently converted to the hypercall buffer as necessary.
- */
-#define DECLARE_HYPERCALL_BUFFER(_type, _name) \
- _type *(_name) = NULL; \
- xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
- .hbuf = NULL, \
- .param_shadow = NULL, \
- HYPERCALL_BUFFER_INIT_NO_BOUNCE \
- }
-
-/*
- * Like DECLARE_HYPERCALL_BUFFER() but using an already allocated
- * hypercall buffer, _hbuf.
- *
- * Useful when a hypercall buffer is passed to a function and access
- * via the user pointer is required.
- *
- * See DECLARE_HYPERCALL_BUFFER_ARGUMENT() if the user pointer is not
- * required.
- */
-#define DECLARE_HYPERCALL_BUFFER_SHADOW(_type, _name, _hbuf) \
- _type *(_name) = (_hbuf)->hbuf; \
- __attribute__((unused)) \
- xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
- .hbuf = (void *)-1, \
- .param_shadow = (_hbuf), \
- HYPERCALL_BUFFER_INIT_NO_BOUNCE \
- }
-
-/*
- * Declare the necessary data structure to allow a hypercall buffer
- * passed as an argument to a function to be used in the normal way.
- */
-#define DECLARE_HYPERCALL_BUFFER_ARGUMENT(_name) \
- xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
- .hbuf = (void *)-1, \
- .param_shadow = (_name), \
- HYPERCALL_BUFFER_INIT_NO_BOUNCE \
- }
-
-/*
- * Get the hypercall buffer data pointer in a form suitable for use
- * directly as a hypercall argument.
- */
-#define HYPERCALL_BUFFER_AS_ARG(_name) \
- ({ xc_hypercall_buffer_t _hcbuf_arg1; \
- typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_arg2 = \
- HYPERCALL_BUFFER(_name); \
- (void)(&_hcbuf_arg1 == _hcbuf_arg2); \
- (unsigned long)(_hcbuf_arg2)->hbuf; \
- })
-
-/*
- * Set a xen_guest_handle in a type safe manner, ensuring that the
- * data pointer has been correctly allocated.
- */
-#define set_xen_guest_handle_impl(_hnd, _val, _byte_off) \
- do { \
- xc_hypercall_buffer_t _hcbuf_hnd1; \
- typeof(XC__HYPERCALL_BUFFER_NAME(_val)) *_hcbuf_hnd2 = \
- HYPERCALL_BUFFER(_val); \
- (void) (&_hcbuf_hnd1 == _hcbuf_hnd2); \
- set_xen_guest_handle_raw(_hnd, \
- (_hcbuf_hnd2)->hbuf + (_byte_off)); \
- } while (0)
-
-#undef set_xen_guest_handle
-#define set_xen_guest_handle(_hnd, _val) \
- set_xen_guest_handle_impl(_hnd, _val, 0)
-
-#define set_xen_guest_handle_offset(_hnd, _val, _off) \
- set_xen_guest_handle_impl(_hnd, _val, \
- ((sizeof(*_val)*(_off))))
-
-/* Use with set_xen_guest_handle in place of NULL */
-extern xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL);
-
-/*
- * Allocate and free hypercall buffers with byte granularity.
- */
-void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size);
-#define xc_hypercall_buffer_alloc(_xch, _name, _size) xc__hypercall_buffer_alloc(_xch, HYPERCALL_BUFFER(_name), _size)
-void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b);
-#define xc_hypercall_buffer_free(_xch, _name) xc__hypercall_buffer_free(_xch, HYPERCALL_BUFFER(_name))
-
-/*
- * Allocate and free hypercall buffers with page alignment.
- */
-void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
-#define xc_hypercall_buffer_alloc_pages(_xch, _name, _nr) xc__hypercall_buffer_alloc_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
-void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
-#define xc_hypercall_buffer_free_pages(_xch, _name, _nr) \
- do { \
- if ( _name ) \
- xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name), \
- _nr); \
- } while (0)
-
-/*
- * Array of hypercall buffers.
- *
- * Create an array with xc_hypercall_buffer_array_create() and
- * populate it by declaring one hypercall buffer in a loop and
- * allocating the buffer with xc_hypercall_buffer_array_alloc().
- *
- * To access a previously allocated buffers, declare a new hypercall
- * buffer and call xc_hypercall_buffer_array_get().
- *
- * Destroy the array with xc_hypercall_buffer_array_destroy() to free
- * the array and all its allocated hypercall buffers.
- */
-struct xc_hypercall_buffer_array;
-typedef struct xc_hypercall_buffer_array xc_hypercall_buffer_array_t;
-
-xc_hypercall_buffer_array_t *xc_hypercall_buffer_array_create(xc_interface *xch, unsigned n);
-void *xc__hypercall_buffer_array_alloc(xc_interface *xch, xc_hypercall_buffer_array_t *array,
- unsigned index, xc_hypercall_buffer_t *hbuf, size_t size);
-#define xc_hypercall_buffer_array_alloc(_xch, _array, _index, _name, _size) \
- xc__hypercall_buffer_array_alloc(_xch, _array, _index, HYPERCALL_BUFFER(_name), _size)
-void *xc__hypercall_buffer_array_get(xc_interface *xch, xc_hypercall_buffer_array_t *array,
- unsigned index, xc_hypercall_buffer_t *hbuf);
-#define xc_hypercall_buffer_array_get(_xch, _array, _index, _name, _size) \
- xc__hypercall_buffer_array_get(_xch, _array, _index, HYPERCALL_BUFFER(_name))
-void xc_hypercall_buffer_array_destroy(xc_interface *xc, xc_hypercall_buffer_array_t *array);
-
-/*
- * CPUMAP handling
- */
-typedef uint8_t *xc_cpumap_t;
-
-/* return maximum number of cpus the hypervisor supports */
-int xc_get_max_cpus(xc_interface *xch);
-
-/* return the number of online cpus */
-int xc_get_online_cpus(xc_interface *xch);
-
-/* return array size for cpumap */
-int xc_get_cpumap_size(xc_interface *xch);
-
-/* allocate a cpumap */
-xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
-
-/* clear an CPU from the cpumap. */
-void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map);
-
-/* set an CPU in the cpumap. */
-void xc_cpumap_setcpu(int cpu, xc_cpumap_t map);
-
-/* Test whether the CPU in cpumap is set. */
-int xc_cpumap_testcpu(int cpu, xc_cpumap_t map);
-
-/*
- * NODEMAP handling
- */
-typedef uint8_t *xc_nodemap_t;
-
-/* return maximum number of NUMA nodes the hypervisor supports */
-int xc_get_max_nodes(xc_interface *xch);
-
-/* return array size for nodemap */
-int xc_get_nodemap_size(xc_interface *xch);
-
-/* allocate a nodemap */
-xc_nodemap_t xc_nodemap_alloc(xc_interface *xch);
-
-/*
- * DOMAIN DEBUGGING FUNCTIONS
- */
-
-typedef struct xc_core_header {
- unsigned int xch_magic;
- unsigned int xch_nr_vcpus;
- unsigned int xch_nr_pages;
- unsigned int xch_ctxt_offset;
- unsigned int xch_index_offset;
- unsigned int xch_pages_offset;
-} xc_core_header_t;
-
-#define XC_CORE_MAGIC 0xF00FEBED
-#define XC_CORE_MAGIC_HVM 0xF00FEBEE
-
-/*
- * DOMAIN MANAGEMENT FUNCTIONS
- */
-
-typedef struct xc_dominfo {
- uint32_t domid;
- uint32_t ssidref;
- unsigned int dying:1, crashed:1, shutdown:1,
- paused:1, blocked:1, running:1,
- hvm:1, debugged:1, xenstore:1, hap:1;
- unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
- unsigned long nr_pages; /* current number, not maximum */
- unsigned long nr_outstanding_pages;
- unsigned long nr_shared_pages;
- unsigned long nr_paged_pages;
- unsigned long shared_info_frame;
- uint64_t cpu_time;
- unsigned long max_memkb;
- unsigned int nr_online_vcpus;
- unsigned int max_vcpu_id;
- xen_domain_handle_t handle;
- unsigned int cpupool;
- struct xen_arch_domainconfig arch_config;
-} xc_dominfo_t;
-
-typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
-
-typedef union
-{
-#if defined(__i386__) || defined(__x86_64__)
- vcpu_guest_context_x86_64_t x64;
- vcpu_guest_context_x86_32_t x32;
-#endif
- vcpu_guest_context_t c;
-} vcpu_guest_context_any_t;
-
-typedef union
-{
-#if defined(__i386__) || defined(__x86_64__)
- shared_info_x86_64_t x64;
- shared_info_x86_32_t x32;
-#endif
- shared_info_t s;
-} shared_info_any_t;
-
-#if defined(__i386__) || defined(__x86_64__)
-typedef union
-{
- start_info_x86_64_t x64;
- start_info_x86_32_t x32;
- start_info_t s;
-} start_info_any_t;
-#endif
-
-typedef struct xc_vcpu_extstate {
- uint64_t xfeature_mask;
- uint64_t size;
- void *buffer;
-} xc_vcpu_extstate_t;
-
-int xc_domain_create(xc_interface *xch, uint32_t *pdomid,
- struct xen_domctl_createdomain *config);
-
-
-/* Functions to produce a dump of a given domain
- * xc_domain_dumpcore - produces a dump to a specified file
- * xc_domain_dumpcore_via_callback - produces a dump, using a specified
- * callback function
- */
-int xc_domain_dumpcore(xc_interface *xch,
- uint32_t domid,
- const char *corename);
-
-/* Define the callback function type for xc_domain_dumpcore_via_callback.
- *
- * This function is called by the coredump code for every "write",
- * and passes an opaque object for the use of the function and
- * created by the caller of xc_domain_dumpcore_via_callback.
- */
-typedef int (dumpcore_rtn_t)(xc_interface *xch,
- void *arg, char *buffer, unsigned int length);
-
-int xc_domain_dumpcore_via_callback(xc_interface *xch,
- uint32_t domid,
- void *arg,
- dumpcore_rtn_t dump_rtn);
-
-/*
- * This function sets the maximum number of vcpus that a domain may create.
- *
- * @parm xch a handle to an open hypervisor interface.
- * @parm domid the domain id in which vcpus are to be created.
- * @parm max the maximum number of vcpus that the domain may create.
- * @return 0 on success, -1 on failure.
- */
-int xc_domain_max_vcpus(xc_interface *xch,
- uint32_t domid,
- unsigned int max);
-
-/**
- * This function pauses a domain. A paused domain still exists in memory
- * however it does not receive any timeslices from the hypervisor.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to pause
- * @return 0 on success, -1 on failure.
- */
-int xc_domain_pause(xc_interface *xch,
- uint32_t domid);
-/**
- * This function unpauses a domain. The domain should have been previously
- * paused.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to unpause
- * return 0 on success, -1 on failure
- */
-int xc_domain_unpause(xc_interface *xch,
- uint32_t domid);
-
-/**
- * This function will destroy a domain. Destroying a domain removes the domain
- * completely from memory. This function should be called after sending the
- * domain a SHUTDOWN control message to free up the domain resources.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to destroy
- * @return 0 on success, -1 on failure
- */
-int xc_domain_destroy(xc_interface *xch,
- uint32_t domid);
-
-
-/**
- * This function resumes a suspended domain. The domain should have
- * been previously suspended.
- *
- * Note that there are 'xc_domain_suspend' as suspending a domain
- * is quite the endeavour.
- *
- * For the purpose of this explanation there are three guests:
- * PV (using hypercalls for privilgied operations), HVM
- * (fully hardware virtualized guests using emulated devices for everything),
- * and PVHVM (PV aware with hardware virtualisation).
- *
- * HVM guest are the simplest - they suspend via S3 / S4 and resume from
- * S3 / S4. Upon resume they have to re-negotiate with the emulated devices.
- *
- * PV and PVHVM communicate via hypercalls for suspend (and resume).
- * For suspend the toolstack initiates the process by writing an value
- * in XenBus "control/shutdown" with the string "suspend".
- *
- * The PV guest stashes anything it deems neccessary in 'struct
- * start_info' in case of failure (PVHVM may ignore this) and calls
- * the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall (for PV as
- * argument it passes the MFN to 'struct start_info').
- *
- * And then the guest is suspended.
- *
- * The checkpointing or notifying a guest that the suspend failed or
- * cancelled (in case of checkpoint) is by having the
- * SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return a non-zero
- * value.
- *
- * The PV and PVHVM resume path are similar. For PV it would be
- * similar to bootup - figure out where the 'struct start_info' is (or
- * if the suspend was cancelled aka checkpointed - reuse the saved
- * values).
- *
- * From here on they differ depending whether the guest is PV or PVHVM
- * in specifics but follow overall the same path:
- * - PV: Bringing up the vCPUS,
- * - PVHVM: Setup vector callback,
- * - Bring up vCPU runstates,
- * - Remap the grant tables if checkpointing or setup from scratch,
- *
- *
- * If the resume was not checkpointing (or if suspend was succesful) we would
- * setup the PV timers and the different PV events. Lastly the PV drivers
- * re-negotiate with the backend.
- *
- * This function would return before the guest started resuming. That is
- * the guest would be in non-running state and its vCPU context would be
- * in the the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return path
- * (for PV and PVHVM). For HVM it would be in would be in QEMU emulated
- * BIOS handling S3 suspend.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to resume
- * @parm fast use cooperative resume (guest must support this)
- * return 0 on success, -1 on failure
- */
-int xc_domain_resume(xc_interface *xch,
- uint32_t domid,
- int fast);
-
-/**
- * This function will shutdown a domain. This is intended for use in
- * fully-virtualized domains where this operation is analogous to the
- * sched_op operations in a paravirtualized domain. The caller is
- * expected to give the reason for the shutdown.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to destroy
- * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
- * @return 0 on success, -1 on failure
- */
-int xc_domain_shutdown(xc_interface *xch,
- uint32_t domid,
- int reason);
-
-int xc_watchdog(xc_interface *xch,
- uint32_t id,
- uint32_t timeout);
-
-/**
- * This function explicitly sets the host NUMA nodes the domain will
- * have affinity with.
- *
- * @parm xch a handle to an open hypervisor interface.
- * @parm domid the domain id one wants to set the affinity of.
- * @parm nodemap the map of the affine nodes.
- * @return 0 on success, -1 on failure.
- */
-int xc_domain_node_setaffinity(xc_interface *xch,
- uint32_t domind,
- xc_nodemap_t nodemap);
-
-/**
- * This function retrieves the host NUMA nodes the domain has
- * affinity with.
- *
- * @parm xch a handle to an open hypervisor interface.
- * @parm domid the domain id one wants to get the node affinity of.
- * @parm nodemap the map of the affine nodes.
- * @return 0 on success, -1 on failure.
- */
-int xc_domain_node_getaffinity(xc_interface *xch,
- uint32_t domind,
- xc_nodemap_t nodemap);
-
-/**
- * This function specifies the CPU affinity for a vcpu.
- *
- * There are two kinds of affinity. Soft affinity is on what CPUs a vcpu
- * prefers to run. Hard affinity is on what CPUs a vcpu is allowed to run.
- * If flags contains XEN_VCPUAFFINITY_SOFT, the soft affinity it is set to
- * what cpumap_soft_inout contains. If flags contains XEN_VCPUAFFINITY_HARD,
- * the hard affinity is set to what cpumap_hard_inout contains. Both flags
- * can be set at the same time, in which case both soft and hard affinity are
- * set to what the respective parameter contains.
- *
- * The function also returns the effective hard or/and soft affinity, still
- * via the cpumap_soft_inout and cpumap_hard_inout parameters. Effective
- * affinity is, in case of soft affinity, the intersection of soft affinity,
- * hard affinity and the cpupool's online CPUs for the domain, and is returned
- * in cpumap_soft_inout, if XEN_VCPUAFFINITY_SOFT is set in flags. In case of
- * hard affinity, it is the intersection between hard affinity and the
- * cpupool's online CPUs, and is returned in cpumap_hard_inout, if
- * XEN_VCPUAFFINITY_HARD is set in flags. If both flags are set, both soft
- * and hard affinity are returned in the respective parameter.
- *
- * We do report it back as effective affinity is what the Xen scheduler will
- * actually use, and we thus allow checking whether or not that matches with,
- * or at least is good enough for, the caller's purposes.
- *
- * @param xch a handle to an open hypervisor interface.
- * @param domid the id of the domain to which the vcpu belongs
- * @param vcpu the vcpu id wihin the domain
- * @param cpumap_hard_inout specifies(/returns) the (effective) hard affinity
- * @param cpumap_soft_inout specifies(/returns) the (effective) soft affinity
- * @param flags what we want to set
- */
-int xc_vcpu_setaffinity(xc_interface *xch,
- uint32_t domid,
- int vcpu,
- xc_cpumap_t cpumap_hard_inout,
- xc_cpumap_t cpumap_soft_inout,
- uint32_t flags);
-
-/**
- * This function retrieves hard and soft CPU affinity of a vcpu,
- * depending on what flags are set.
- *
- * Soft affinity is returned in cpumap_soft if XEN_VCPUAFFINITY_SOFT is set.
- * Hard affinity is returned in cpumap_hard if XEN_VCPUAFFINITY_HARD is set.
- *
- * @param xch a handle to an open hypervisor interface.
- * @param domid the id of the domain to which the vcpu belongs
- * @param vcpu the vcpu id wihin the domain
- * @param cpumap_hard is where hard affinity is returned
- * @param cpumap_soft is where soft affinity is returned
- * @param flags what we want get
- */
-int xc_vcpu_getaffinity(xc_interface *xch,
- uint32_t domid,
- int vcpu,
- xc_cpumap_t cpumap_hard,
- xc_cpumap_t cpumap_soft,
- uint32_t flags);
-
-
-/**
- * This function will return the guest_width (in bytes) for the
- * specified domain.
- *
- * @param xch a handle to an open hypervisor interface.
- * @param domid the domain id one wants the address size width of.
- * @param addr_size the address size.
- */
-int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
- unsigned int *guest_width);
-
-
-/**
- * This function will return information about one or more domains. It is
- * designed to iterate over the list of domains. If a single domain is
- * requested, this function will return the next domain in the list - if
- * one exists. It is, therefore, important in this case to make sure the
- * domain requested was the one returned.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm first_domid the first domain to enumerate information from. Domains
- * are currently enumerate in order of creation.
- * @parm max_doms the number of elements in info
- * @parm info an array of max_doms size that will contain the information for
- * the enumerated domains.
- * @return the number of domains enumerated or -1 on error
- */
-int xc_domain_getinfo(xc_interface *xch,
- uint32_t first_domid,
- unsigned int max_doms,
- xc_dominfo_t *info);
-
-
-/**
- * This function will set the execution context for the specified vcpu.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to set the vcpu context for
- * @parm vcpu the vcpu number for the context
- * @parm ctxt pointer to the the cpu context with the values to set
- * @return the number of domains enumerated or -1 on error
- */
-int xc_vcpu_setcontext(xc_interface *xch,
- uint32_t domid,
- uint32_t vcpu,
- vcpu_guest_context_any_t *ctxt);
-/**
- * This function will return information about one or more domains, using a
- * single hypercall. The domain information will be stored into the supplied
- * array of xc_domaininfo_t structures.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm first_domain the first domain to enumerate information from.
- * Domains are currently enumerate in order of creation.
- * @parm max_domains the number of elements in info
- * @parm info an array of max_doms size that will contain the information for
- * the enumerated domains.
- * @return the number of domains enumerated or -1 on error
- */
-int xc_domain_getinfolist(xc_interface *xch,
- uint32_t first_domain,
- unsigned int max_domains,
- xc_domaininfo_t *info);
-
-/**
- * This function set p2m for broken page
- * &parm xch a handle to an open hypervisor interface
- * @parm domid the domain id which broken page belong to
- * @parm pfn the pfn number of the broken page
- * @return 0 on success, -1 on failure
- */
-int xc_set_broken_page_p2m(xc_interface *xch,
- uint32_t domid,
- unsigned long pfn);
-
-/**
- * This function returns information about the context of a hvm domain
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get information from
- * @parm ctxt_buf a pointer to a structure to store the execution context of
- * the hvm domain
- * @parm size the size of ctxt_buf in bytes
- * @return 0 on success, -1 on failure
- */
-int xc_domain_hvm_getcontext(xc_interface *xch,
- uint32_t domid,
- uint8_t *ctxt_buf,
- uint32_t size);
-
-
-/**
- * This function returns one element of the context of a hvm domain
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get information from
- * @parm typecode which type of elemnt required
- * @parm instance which instance of the type
- * @parm ctxt_buf a pointer to a structure to store the execution context of
- * the hvm domain
- * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
- * @return 0 on success, -1 on failure
- */
-int xc_domain_hvm_getcontext_partial(xc_interface *xch,
- uint32_t domid,
- uint16_t typecode,
- uint16_t instance,
- void *ctxt_buf,
- uint32_t size);
-
-/**
- * This function will set the context for hvm domain
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to set the hvm domain context for
- * @parm hvm_ctxt pointer to the the hvm context with the values to set
- * @parm size the size of hvm_ctxt in bytes
- * @return 0 on success, -1 on failure
- */
-int xc_domain_hvm_setcontext(xc_interface *xch,
- uint32_t domid,
- uint8_t *hvm_ctxt,
- uint32_t size);
-
-/**
- * This function will return guest IO ABI protocol
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get IO ABI protocol for
- * @return guest protocol on success, NULL on failure
- */
-const char *xc_domain_get_native_protocol(xc_interface *xch,
- uint32_t domid);
-
-/**
- * This function returns information about the execution context of a
- * particular vcpu of a domain.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get information from
- * @parm vcpu the vcpu number
- * @parm ctxt a pointer to a structure to store the execution context of the
- * domain
- * @return 0 on success, -1 on failure
- */
-int xc_vcpu_getcontext(xc_interface *xch,
- uint32_t domid,
- uint32_t vcpu,
- vcpu_guest_context_any_t *ctxt);
-
-/**
- * This function initializes the vuart emulation and returns
- * the event to be used by the backend for communicating with
- * the emulation code.
- *
- * @parm xch a handle to an open hypervisor interface
- * #parm type type of vuart
- * @parm domid the domain to get information from
- * @parm console_domid the domid of the backend console
- * @parm gfn the guest pfn to be used as the ring buffer
- * @parm evtchn the event channel to be used for events
- * @return 0 on success, negative error on failure
- */
-int xc_dom_vuart_init(xc_interface *xch,
- uint32_t type,
- uint32_t domid,
- uint32_t console_domid,
- xen_pfn_t gfn,
- evtchn_port_t *evtchn);
-
-/**
- * This function returns information about the XSAVE state of a particular
- * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
- * the call is considered a query to retrieve them and the buffer is not
- * filled.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get information from
- * @parm vcpu the vcpu number
- * @parm extstate a pointer to a structure to store the XSAVE state of the
- * domain
- * @return 0 on success, negative error code on failure
- */
-int xc_vcpu_get_extstate(xc_interface *xch,
- uint32_t domid,
- uint32_t vcpu,
- xc_vcpu_extstate_t *extstate);
-
-typedef struct xen_domctl_getvcpuinfo xc_vcpuinfo_t;
-int xc_vcpu_getinfo(xc_interface *xch,
- uint32_t domid,
- uint32_t vcpu,
- xc_vcpuinfo_t *info);
-
-long long xc_domain_get_cpu_usage(xc_interface *xch,
- uint32_t domid,
- int vcpu);
-
-int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
- xen_domain_handle_t handle);
-
-typedef struct xen_domctl_shadow_op_stats xc_shadow_op_stats_t;
-int xc_shadow_control(xc_interface *xch,
- uint32_t domid,
- unsigned int sop,
- xc_hypercall_buffer_t *dirty_bitmap,
- unsigned long pages,
- unsigned long *mb,
- uint32_t mode,
- xc_shadow_op_stats_t *stats);
-
-int xc_sched_credit_domain_set(xc_interface *xch,
- uint32_t domid,
- struct xen_domctl_sched_credit *sdom);
-
-int xc_sched_credit_domain_get(xc_interface *xch,
- uint32_t domid,
- struct xen_domctl_sched_credit *sdom);
-int xc_sched_credit_params_set(xc_interface *xch,
- uint32_t cpupool_id,
- struct xen_sysctl_credit_schedule *schedule);
-int xc_sched_credit_params_get(xc_interface *xch,
- uint32_t cpupool_id,
- struct xen_sysctl_credit_schedule *schedule);
-
-int xc_sched_credit2_params_set(xc_interface *xch,
- uint32_t cpupool_id,
- struct xen_sysctl_credit2_schedule *schedule);
-int xc_sched_credit2_params_get(xc_interface *xch,
- uint32_t cpupool_id,
- struct xen_sysctl_credit2_schedule *schedule);
-int xc_sched_credit2_domain_set(xc_interface *xch,
- uint32_t domid,
- struct xen_domctl_sched_credit2 *sdom);
-int xc_sched_credit2_domain_get(xc_interface *xch,
- uint32_t domid,
- struct xen_domctl_sched_credit2 *sdom);
-
-int xc_sched_rtds_domain_set(xc_interface *xch,
- uint32_t domid,
- struct xen_domctl_sched_rtds *sdom);
-int xc_sched_rtds_domain_get(xc_interface *xch,
- uint32_t domid,
- struct xen_domctl_sched_rtds *sdom);
-int xc_sched_rtds_vcpu_set(xc_interface *xch,
- uint32_t domid,
- struct xen_domctl_schedparam_vcpu *vcpus,
- uint32_t num_vcpus);
-int xc_sched_rtds_vcpu_get(xc_interface *xch,
- uint32_t domid,
- struct xen_domctl_schedparam_vcpu *vcpus,
- uint32_t num_vcpus);
-
-int
-xc_sched_arinc653_schedule_set(
- xc_interface *xch,
- uint32_t cpupool_id,
- struct xen_sysctl_arinc653_schedule *schedule);
-
-int
-xc_sched_arinc653_schedule_get(
- xc_interface *xch,
- uint32_t cpupool_id,
- struct xen_sysctl_arinc653_schedule *schedule);
-
-/**
- * This function sends a trigger to a domain.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to send trigger
- * @parm trigger the trigger type
- * @parm vcpu the vcpu number to send trigger
- * return 0 on success, -1 on failure
- */
-int xc_domain_send_trigger(xc_interface *xch,
- uint32_t domid,
- uint32_t trigger,
- uint32_t vcpu);
-
-/**
- * This function enables or disable debugging of a domain.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to send trigger
- * @parm enable true to enable debugging
- * return 0 on success, -1 on failure
- */
-int xc_domain_setdebugging(xc_interface *xch,
- uint32_t domid,
- unsigned int enable);
-
-/**
- * This function audits the (top level) p2m of a domain
- * and returns the different error counts, if any.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id whose top level p2m we
- * want to audit
- * @parm orphans count of m2p entries for valid
- * domain pages containing an invalid value
- * @parm m2p_bad count of m2p entries mismatching the
- * associated p2m entry for this domain
- * @parm p2m_bad count of p2m entries for this domain
- * mismatching the associated m2p entry
- * return 0 on success, -1 on failure
- * errno values on failure include:
- * -ENOSYS: not implemented
- * -EFAULT: could not copy results back to guest
- */
-int xc_domain_p2m_audit(xc_interface *xch,
- uint32_t domid,
- uint64_t *orphans,
- uint64_t *m2p_bad,
- uint64_t *p2m_bad);
-
-/**
- * This function sets or clears the requirement that an access memory
- * event listener is required on the domain.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to send trigger
- * @parm enable true to require a listener
- * return 0 on success, -1 on failure
- */
-int xc_domain_set_access_required(xc_interface *xch,
- uint32_t domid,
- unsigned int required);
-/**
- * This function sets the handler of global VIRQs sent by the hypervisor
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id which will handle the VIRQ
- * @parm virq the virq number (VIRQ_*)
- * return 0 on success, -1 on failure
- */
-int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
-
-/*
- * CPUPOOL MANAGEMENT FUNCTIONS
- */
-
-typedef struct xc_cpupoolinfo {
- uint32_t cpupool_id;
- uint32_t sched_id;
- uint32_t n_dom;
- xc_cpumap_t cpumap;
-} xc_cpupoolinfo_t;
-
-#define XC_CPUPOOL_POOLID_ANY 0xFFFFFFFF
-
-/**
- * Create a new cpupool.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm ppoolid pointer to the new cpupool id (in/out)
- * @parm sched_id id of scheduler to use for pool
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_create(xc_interface *xch,
- uint32_t *ppoolid,
- uint32_t sched_id);
-
-/**
- * Destroy a cpupool. Pool must be unused and have no cpu assigned.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid id of the cpupool to destroy
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_destroy(xc_interface *xch,
- uint32_t poolid);
-
-/**
- * Get cpupool info. Returns info for up to the specified number of cpupools
- * starting at the given id.
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid lowest id for which info is returned
- * return cpupool info ptr (to be freed via xc_cpupool_infofree)
- */
-xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
- uint32_t poolid);
-
-/**
- * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo.
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm info area to free
- */
-void xc_cpupool_infofree(xc_interface *xch,
- xc_cpupoolinfo_t *info);
-
-/**
- * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid id of the cpupool
- * @parm cpu cpu number to add
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_addcpu(xc_interface *xch,
- uint32_t poolid,
- int cpu);
-
-/**
- * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid id of the cpupool
- * @parm cpu cpu number to remove
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_removecpu(xc_interface *xch,
- uint32_t poolid,
- int cpu);
-
-/**
- * Move domain to another cpupool.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid id of the destination cpupool
- * @parm domid id of the domain to move
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_movedomain(xc_interface *xch,
- uint32_t poolid,
- uint32_t domid);
-
-/**
- * Return map of cpus not in any cpupool.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * return cpumap array on success, NULL else
- */
-xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch);
-
-/*
- * EVENT CHANNEL FUNCTIONS
- *
- * None of these do any logging.
- */
-
-/* A port identifier is guaranteed to fit in 31 bits. */
-typedef int xc_evtchn_port_or_error_t;
-
-/**
- * This function allocates an unbound port. Ports are named endpoints used for
- * interdomain communication. This function is most useful in opening a
- * well-known port within a domain to receive events on.
- *
- * NOTE: If you are allocating a *local* unbound port, you probably want to
- * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
- * ports *only* during domain creation.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm dom the ID of the local domain (the 'allocatee')
- * @parm remote_dom the ID of the domain who will later bind
- * @return allocated port (in @dom) on success, -1 on failure
- */
-xc_evtchn_port_or_error_t
-xc_evtchn_alloc_unbound(xc_interface *xch,
- uint32_t dom,
- uint32_t remote_dom);
-
-int xc_evtchn_reset(xc_interface *xch,
- uint32_t dom);
-
-typedef struct evtchn_status xc_evtchn_status_t;
-int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status);
-
-
-
-int xc_physdev_pci_access_modify(xc_interface *xch,
- uint32_t domid,
- int bus,
- int dev,
- int func,
- int enable);
-
-int xc_readconsolering(xc_interface *xch,
- char *buffer,
- unsigned int *pnr_chars,
- int clear, int incremental, uint32_t *pindex);
-
-int xc_send_debug_keys(xc_interface *xch, const char *keys);
-
-typedef struct xen_sysctl_physinfo xc_physinfo_t;
-typedef struct xen_sysctl_cputopo xc_cputopo_t;
-typedef struct xen_sysctl_numainfo xc_numainfo_t;
-typedef struct xen_sysctl_meminfo xc_meminfo_t;
-typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
-
-typedef uint32_t xc_cpu_to_node_t;
-typedef uint32_t xc_cpu_to_socket_t;
-typedef uint32_t xc_cpu_to_core_t;
-typedef uint64_t xc_node_to_memsize_t;
-typedef uint64_t xc_node_to_memfree_t;
-typedef uint32_t xc_node_to_node_dist_t;
-
-int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
-int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
- xc_cputopo_t *cputopo);
-int xc_microcode_update(xc_interface *xch, const void *buf, size_t len);
-int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
- xc_meminfo_t *meminfo, uint32_t *distance);
-int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
- physdev_pci_device_t *devs, uint32_t *nodes);
-
-int xc_sched_id(xc_interface *xch,
- int *sched_id);
-
-int xc_machphys_mfn_list(xc_interface *xch,
- unsigned long max_extents,
- xen_pfn_t *extent_start);
-
-typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
-int xc_getcpuinfo(xc_interface *xch, int max_cpus,
- xc_cpuinfo_t *info, int *nr_cpus);
-
-int xc_domain_setmaxmem(xc_interface *xch,
- uint32_t domid,
- uint64_t max_memkb);
-
-int xc_domain_set_memmap_limit(xc_interface *xch,
- uint32_t domid,
- unsigned long map_limitkb);
-
-int xc_domain_setvnuma(xc_interface *xch,
- uint32_t domid,
- uint32_t nr_vnodes,
- uint32_t nr_regions,
- uint32_t nr_vcpus,
- xen_vmemrange_t *vmemrange,
- unsigned int *vdistance,
- unsigned int *vcpu_to_vnode,
- unsigned int *vnode_to_pnode);
-/*
- * Retrieve vnuma configuration
- * domid: IN, target domid
- * nr_vnodes: IN/OUT, number of vnodes, not NULL
- * nr_vmemranges: IN/OUT, number of vmemranges, not NULL
- * nr_vcpus: IN/OUT, number of vcpus, not NULL
- * vmemranges: OUT, an array which has length of nr_vmemranges
- * vdistance: OUT, an array which has length of nr_vnodes * nr_vnodes
- * vcpu_to_vnode: OUT, an array which has length of nr_vcpus
- */
-int xc_domain_getvnuma(xc_interface *xch,
- uint32_t domid,
- uint32_t *nr_vnodes,
- uint32_t *nr_vmemranges,
- uint32_t *nr_vcpus,
- xen_vmemrange_t *vmemrange,
- unsigned int *vdistance,
- unsigned int *vcpu_to_vnode);
-
-int xc_domain_soft_reset(xc_interface *xch,
- uint32_t domid);
-
-#if defined(__i386__) || defined(__x86_64__)
-/*
- * PC BIOS standard E820 types and structure.
- */
-#define E820_RAM 1
-#define E820_RESERVED 2
-#define E820_ACPI 3
-#define E820_NVS 4
-#define E820_UNUSABLE 5
-
-#define E820MAX (128)
-
-struct e820entry {
- uint64_t addr;
- uint64_t size;
- uint32_t type;
-} __attribute__((packed));
-int xc_domain_set_memory_map(xc_interface *xch,
- uint32_t domid,
- struct e820entry entries[],
- uint32_t nr_entries);
-
-int xc_get_machine_memory_map(xc_interface *xch,
- struct e820entry entries[],
- uint32_t max_entries);
-#endif
-
-int xc_reserved_device_memory_map(xc_interface *xch,
- uint32_t flags,
- uint16_t seg,
- uint8_t bus,
- uint8_t devfn,
- struct xen_reserved_device_memory entries[],
- uint32_t *max_entries);
-int xc_domain_set_time_offset(xc_interface *xch,
- uint32_t domid,
- int32_t time_offset_seconds);
-
-int xc_domain_set_tsc_info(xc_interface *xch,
- uint32_t domid,
- uint32_t tsc_mode,
- uint64_t elapsed_nsec,
- uint32_t gtsc_khz,
- uint32_t incarnation);
-
-int xc_domain_get_tsc_info(xc_interface *xch,
- uint32_t domid,
- uint32_t *tsc_mode,
- uint64_t *elapsed_nsec,
- uint32_t *gtsc_khz,
- uint32_t *incarnation);
-
-int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
-
-int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
-
-int xc_domain_increase_reservation(xc_interface *xch,
- uint32_t domid,
- unsigned long nr_extents,
- unsigned int extent_order,
- unsigned int mem_flags,
- xen_pfn_t *extent_start);
-
-int xc_domain_increase_reservation_exact(xc_interface *xch,
- uint32_t domid,
- unsigned long nr_extents,
- unsigned int extent_order,
- unsigned int mem_flags,
- xen_pfn_t *extent_start);
-
-int xc_domain_decrease_reservation(xc_interface *xch,
- uint32_t domid,
- unsigned long nr_extents,
- unsigned int extent_order,
- xen_pfn_t *extent_start);
-
-int xc_domain_decrease_reservation_exact(xc_interface *xch,
- uint32_t domid,
- unsigned long nr_extents,
- unsigned int extent_order,
- xen_pfn_t *extent_start);
-
-int xc_domain_add_to_physmap(xc_interface *xch,
- uint32_t domid,
- unsigned int space,
- unsigned long idx,
- xen_pfn_t gpfn);
-
-int xc_domain_add_to_physmap_batch(xc_interface *xch,
- uint32_t domid,
- uint32_t foreign_domid,
- unsigned int space,
- unsigned int size,
- xen_ulong_t *idxs,
- xen_pfn_t *gfpns,
- int *errs);
-
-int xc_domain_remove_from_physmap(xc_interface *xch,
- uint32_t domid,
- xen_pfn_t gpfn);
-
-int xc_domain_populate_physmap(xc_interface *xch,
- uint32_t domid,
- unsigned long nr_extents,
- unsigned int extent_order,
- unsigned int mem_flags,
- xen_pfn_t *extent_start);
-
-int xc_domain_populate_physmap_exact(xc_interface *xch,
- uint32_t domid,
- unsigned long nr_extents,
- unsigned int extent_order,
- unsigned int mem_flags,
- xen_pfn_t *extent_start);
-
-int xc_domain_claim_pages(xc_interface *xch,
- uint32_t domid,
- unsigned long nr_pages);
-
-int xc_domain_memory_exchange_pages(xc_interface *xch,
- uint32_t domid,
- unsigned long nr_in_extents,
- unsigned int in_order,
- xen_pfn_t *in_extents,
- unsigned long nr_out_extents,
- unsigned int out_order,
- xen_pfn_t *out_extents);
-
-int xc_domain_set_pod_target(xc_interface *xch,
- uint32_t domid,
- uint64_t target_pages,
- uint64_t *tot_pages,
- uint64_t *pod_cache_pages,
- uint64_t *pod_entries);
-
-int xc_domain_get_pod_target(xc_interface *xch,
- uint32_t domid,
- uint64_t *tot_pages,
- uint64_t *pod_cache_pages,
- uint64_t *pod_entries);
-
-int xc_domain_ioport_permission(xc_interface *xch,
- uint32_t domid,
- uint32_t first_port,
- uint32_t nr_ports,
- uint32_t allow_access);
-
-int xc_domain_irq_permission(xc_interface *xch,
- uint32_t domid,
- uint8_t pirq,
- uint8_t allow_access);
-
-int xc_domain_iomem_permission(xc_interface *xch,
- uint32_t domid,
- unsigned long first_mfn,
- unsigned long nr_mfns,
- uint8_t allow_access);
-
-unsigned long xc_make_page_below_4G(xc_interface *xch, uint32_t domid,
- unsigned long mfn);
-
-typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
-typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
-int xc_perfc_reset(xc_interface *xch);
-int xc_perfc_query_number(xc_interface *xch,
- int *nbr_desc,
- int *nbr_val);
-int xc_perfc_query(xc_interface *xch,
- xc_hypercall_buffer_t *desc,
- xc_hypercall_buffer_t *val);
-
-typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
-int xc_lockprof_reset(xc_interface *xch);
-int xc_lockprof_query_number(xc_interface *xch,
- uint32_t *n_elems);
-int xc_lockprof_query(xc_interface *xch,
- uint32_t *n_elems,
- uint64_t *time,
- xc_hypercall_buffer_t *data);
-
-void *xc_memalign(xc_interface *xch, size_t alignment, size_t size);
-
-/**
- * Avoid using this function, as it does not work for all cases (such
- * as 4M superpages, or guests using PSE36). Only used for debugging.
- *
- * Translates a virtual address in the context of a given domain and
- * vcpu returning the GFN containing the address (that is, an MFN for
- * PV guests, a PFN for HVM guests). Returns 0 for failure.
- *
- * @parm xch a handle on an open hypervisor interface
- * @parm dom the domain to perform the translation in
- * @parm vcpu the vcpu to perform the translation on
- * @parm virt the virtual address to translate
- */
-unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
- int vcpu, unsigned long long virt);
-
-
-int xc_copy_to_domain_page(xc_interface *xch, uint32_t domid,
- unsigned long dst_pfn, const char *src_page);
-
-int xc_clear_domain_pages(xc_interface *xch, uint32_t domid,
- unsigned long dst_pfn, int num);
-
-static inline int xc_clear_domain_page(xc_interface *xch, uint32_t domid,
- unsigned long dst_pfn)
-{
- return xc_clear_domain_pages(xch, domid, dst_pfn, 1);
-}
-
-int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
- uint32_t dom);
-
-/* System wide memory properties */
-int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn);
-
-/* Get current total pages allocated to a domain. */
-long xc_get_tot_pages(xc_interface *xch, uint32_t domid);
-
-/**
- * This function retrieves the the number of bytes available
- * in the heap in a specific range of address-widths and nodes.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to query
- * @parm min_width the smallest address width to query (0 if don't care)
- * @parm max_width the largest address width to query (0 if don't care)
- * @parm node the node to query (-1 for all)
- * @parm *bytes caller variable to put total bytes counted
- * @return 0 on success, <0 on failure.
- */
-int xc_availheap(xc_interface *xch, int min_width, int max_width, int node,
- uint64_t *bytes);
-
-/*
- * Trace Buffer Operations
- */
-
-/**
- * xc_tbuf_enable - enable tracing buffers
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm cnt size of tracing buffers to create (in pages)
- * @parm mfn location to store mfn of the trace buffers to
- * @parm size location to store the size (in bytes) of a trace buffer to
- *
- * Gets the machine address of the trace pointer area and the size of the
- * per CPU buffers.
- */
-int xc_tbuf_enable(xc_interface *xch, unsigned long pages,
- unsigned long *mfn, unsigned long *size);
-
-/*
- * Disable tracing buffers.
- */
-int xc_tbuf_disable(xc_interface *xch);
-
-/**
- * This function sets the size of the trace buffers. Setting the size
- * is currently a one-shot operation that may be performed either at boot
- * time or via this interface, not both. The buffer size must be set before
- * enabling tracing.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm size the size in pages per cpu for the trace buffers
- * @return 0 on success, -1 on failure.
- */
-int xc_tbuf_set_size(xc_interface *xch, unsigned long size);
-
-/**
- * This function retrieves the current size of the trace buffers.
- * Note that the size returned is in terms of bytes, not pages.
-
- * @parm xch a handle to an open hypervisor interface
- * @parm size will contain the size in bytes for the trace buffers
- * @return 0 on success, -1 on failure.
- */
-int xc_tbuf_get_size(xc_interface *xch, unsigned long *size);
-
-int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask);
-
-int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask);
-
-int xc_domctl(xc_interface *xch, struct xen_domctl *domctl);
-int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl);
-
-int xc_version(xc_interface *xch, int cmd, void *arg);
-
-int xc_flask_op(xc_interface *xch, xen_flask_op_t *op);
-
-/*
- * Subscribe to domain suspend via evtchn.
- * Returns -1 on failure, in which case errno will be set appropriately.
- * Just calls XEN_DOMCTL_subscribe - see the caveats for that domctl
- * (in its doc comment in domctl.h).
- */
-int xc_domain_subscribe_for_suspend(
- xc_interface *xch, uint32_t domid, evtchn_port_t port);
-
-/**************************
- * GRANT TABLE OPERATIONS *
- **************************/
-
-/*
- * These functions sometimes log messages as above, but not always.
- */
-
-
-int xc_gnttab_op(xc_interface *xch, int cmd,
- void * op, int op_size, int count);
-/* Logs iff hypercall bounce fails, otherwise doesn't. */
-
-int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query);
-int xc_gnttab_get_version(xc_interface *xch, uint32_t domid); /* Never logs */
-grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid, int *gnt_num);
-grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid, int *gnt_num);
-/* Sometimes these don't set errno [fixme], and sometimes they don't log. */
-
-int xc_physdev_map_pirq(xc_interface *xch,
- uint32_t domid,
- int index,
- int *pirq);
-
-int xc_physdev_map_pirq_msi(xc_interface *xch,
- uint32_t domid,
- int index,
- int *pirq,
- int devfn,
- int bus,
- int entry_nr,
- uint64_t table_base);
-
-int xc_physdev_unmap_pirq(xc_interface *xch,
- uint32_t domid,
- int pirq);
-
-/*
- * LOGGING AND ERROR REPORTING
- */
-
-
-#define XC_MAX_ERROR_MSG_LEN 1024
-typedef struct xc_error {
- enum xc_error_code code;
- char message[XC_MAX_ERROR_MSG_LEN];
-} xc_error;
-
-
-/*
- * Convert an error code or level into a text description. Return values
- * are pointers to fixed strings and do not need to be freed.
- * Do not fail, but return pointers to generic strings if fed bogus input.
- */
-const char *xc_error_code_to_desc(int code);
-
-/*
- * Convert an errno value to a text description.
- */
-const char *xc_strerror(xc_interface *xch, int errcode);
-
-
-/*
- * Return a pointer to the last error with level XC_REPORT_ERROR. This
- * pointer and the data pointed to are only valid until the next call
- * to libxc in the same thread.
- */
-const xc_error *xc_get_last_error(xc_interface *handle);
-
-/*
- * Clear the last error
- */
-void xc_clear_last_error(xc_interface *xch);
-
-int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value);
-int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value);
-
-/* Deprecated: use xc_hvm_param_set/get() instead. */
-int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value);
-int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value);
-
-/* HVM guest pass-through */
-int xc_assign_device(xc_interface *xch,
- uint32_t domid,
- uint32_t machine_sbdf,
- uint32_t flag);
-
-int xc_get_device_group(xc_interface *xch,
- uint32_t domid,
- uint32_t machine_sbdf,
- uint32_t max_sdevs,
- uint32_t *num_sdevs,
- uint32_t *sdev_array);
-
-int xc_test_assign_device(xc_interface *xch,
- uint32_t domid,
- uint32_t machine_sbdf);
-
-int xc_deassign_device(xc_interface *xch,
- uint32_t domid,
- uint32_t machine_sbdf);
-
-int xc_assign_dt_device(xc_interface *xch,
- uint32_t domid,
- char *path);
-int xc_test_assign_dt_device(xc_interface *xch,
- uint32_t domid,
- char *path);
-int xc_deassign_dt_device(xc_interface *xch,
- uint32_t domid,
- char *path);
-
-int xc_domain_memory_mapping(xc_interface *xch,
- uint32_t domid,
- unsigned long first_gfn,
- unsigned long first_mfn,
- unsigned long nr_mfns,
- uint32_t add_mapping);
-
-int xc_domain_ioport_mapping(xc_interface *xch,
- uint32_t domid,
- uint32_t first_gport,
- uint32_t first_mport,
- uint32_t nr_ports,
- uint32_t add_mapping);
-
-int xc_domain_update_msi_irq(
- xc_interface *xch,
- uint32_t domid,
- uint32_t gvec,
- uint32_t pirq,
- uint32_t gflags,
- uint64_t gtable);
-
-int xc_domain_unbind_msi_irq(xc_interface *xch,
- uint32_t domid,
- uint32_t gvec,
- uint32_t pirq,
- uint32_t gflags);
-
-int xc_domain_bind_pt_irq(xc_interface *xch,
- uint32_t domid,
- uint8_t machine_irq,
- uint8_t irq_type,
- uint8_t bus,
- uint8_t device,
- uint8_t intx,
- uint8_t isa_irq);
-
-int xc_domain_unbind_pt_irq(xc_interface *xch,
- uint32_t domid,
- uint8_t machine_irq,
- uint8_t irq_type,
- uint8_t bus,
- uint8_t device,
- uint8_t intx,
- uint8_t isa_irq);
-
-int xc_domain_bind_pt_pci_irq(xc_interface *xch,
- uint32_t domid,
- uint8_t machine_irq,
- uint8_t bus,
- uint8_t device,
- uint8_t intx);
-
-int xc_domain_bind_pt_isa_irq(xc_interface *xch,
- uint32_t domid,
- uint8_t machine_irq);
-
-int xc_domain_bind_pt_spi_irq(xc_interface *xch,
- uint32_t domid,
- uint16_t vspi,
- uint16_t spi);
-
-int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
- uint32_t domid,
- uint16_t vspi,
- uint16_t spi);
-
-/* Set the target domain */
-int xc_domain_set_target(xc_interface *xch,
- uint32_t domid,
- uint32_t target);
-
-/* Control the domain for debug */
-int xc_domain_debug_control(xc_interface *xch,
- uint32_t domid,
- uint32_t sop,
- uint32_t vcpu);
-
-#if defined(__i386__) || defined(__x86_64__)
-
-/*
- * CPUID policy data, expressed in the legacy XEND format.
- *
- * Policy is an array of strings, 32 chars long:
- * policy[0] = eax
- * policy[1] = ebx
- * policy[2] = ecx
- * policy[3] = edx
- *
- * The format of the string is the following:
- * '1' -> force to 1
- * '0' -> force to 0
- * 'x' -> we don't care (use default)
- * 'k' -> pass through host value
- * 's' -> legacy alias for 'k'
- */
-struct xc_xend_cpuid {
- union {
- struct {
- uint32_t leaf, subleaf;
- };
- uint32_t input[2];
- };
- char *policy[4];
-};
-
-/*
- * Make adjustments to the CPUID settings for a domain.
- *
- * This path is used in two cases. First, for fresh boots of the domain, and
- * secondly for migrate-in/restore of pre-4.14 guests (where CPUID data was
- * missing from the stream). The @restore parameter distinguishes these
- * cases, and the generated policy must be compatible with a 4.13.
- *
- * Either pass a full new @featureset (and @nr_features), or adjust individual
- * features (@pae, @itsc, @nested_virt).
- *
- * Then (optionally) apply legacy XEND overrides (@xend) to the result.
- */
-int xc_cpuid_apply_policy(xc_interface *xch,
- uint32_t domid, bool restore,
- const uint32_t *featureset,
- unsigned int nr_features, bool pae, bool itsc,
- bool nested_virt, const struct xc_xend_cpuid *xend);
-int xc_mca_op(xc_interface *xch, struct xen_mc *mc);
-int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
- xc_cpumap_t cpumap, unsigned int nr_cpus);
-#endif
-
-struct xc_px_val {
- uint64_t freq; /* Px core frequency */
- uint64_t residency; /* Px residency time */
- uint64_t count; /* Px transition count */
-};
-
-struct xc_px_stat {
- uint8_t total; /* total Px states */
- uint8_t usable; /* usable Px states */
- uint8_t last; /* last Px state */
- uint8_t cur; /* current Px state */
- uint64_t *trans_pt; /* Px transition table */
- struct xc_px_val *pt;
-};
-
-int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px);
-int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt);
-int xc_pm_reset_pxstat(xc_interface *xch, int cpuid);
-
-struct xc_cx_stat {
- uint32_t nr; /* entry nr in triggers[]/residencies[], incl C0 */
- uint32_t last; /* last Cx state */
- uint64_t idle_time; /* idle time from boot */
- uint64_t *triggers; /* Cx trigger counts */
- uint64_t *residencies; /* Cx residencies */
- uint32_t nr_pc; /* entry nr in pc[] */
- uint32_t nr_cc; /* entry nr in cc[] */
- uint64_t *pc; /* 1-biased indexing (i.e. excl C0) */
- uint64_t *cc; /* 1-biased indexing (i.e. excl C0) */
-};
-typedef struct xc_cx_stat xc_cx_stat_t;
-
-int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx);
-int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt);
-int xc_pm_reset_cxstat(xc_interface *xch, int cpuid);
-
-int xc_cpu_online(xc_interface *xch, int cpu);
-int xc_cpu_offline(xc_interface *xch, int cpu);
-int xc_smt_enable(xc_interface *xch);
-int xc_smt_disable(xc_interface *xch);
-
-/*
- * cpufreq para name of this structure named
- * same as sysfs file name of native linux
- */
-typedef struct xen_userspace xc_userspace_t;
-typedef struct xen_ondemand xc_ondemand_t;
-
-struct xc_get_cpufreq_para {
- /* IN/OUT variable */
- uint32_t cpu_num;
- uint32_t freq_num;
- uint32_t gov_num;
-
- /* for all governors */
- /* OUT variable */
- uint32_t *affected_cpus;
- uint32_t *scaling_available_frequencies;
- char *scaling_available_governors;
- char scaling_driver[CPUFREQ_NAME_LEN];
-
- uint32_t cpuinfo_cur_freq;
- uint32_t cpuinfo_max_freq;
- uint32_t cpuinfo_min_freq;
- uint32_t scaling_cur_freq;
-
- char scaling_governor[CPUFREQ_NAME_LEN];
- uint32_t scaling_max_freq;
- uint32_t scaling_min_freq;
-
- /* for specific governor */
- union {
- xc_userspace_t userspace;
- xc_ondemand_t ondemand;
- } u;
-
- int32_t turbo_enabled;
-};
-
-int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
- struct xc_get_cpufreq_para *user_para);
-int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname);
-int xc_set_cpufreq_para(xc_interface *xch, int cpuid,
- int ctrl_type, int ctrl_value);
-int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq);
-
-int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value);
-
-int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value);
-int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value);
-
-int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value);
-int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value);
-
-int xc_enable_turbo(xc_interface *xch, int cpuid);
-int xc_disable_turbo(xc_interface *xch, int cpuid);
-
-/**
- * altp2m operations
- */
-
-int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state);
-int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state);
-int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
- uint32_t vcpuid, xen_pfn_t gfn);
-int xc_altp2m_set_vcpu_disable_notify(xc_interface *handle, uint32_t domid,
- uint32_t vcpuid);
-int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
- xenmem_access_t default_access, uint16_t *view_id);
-int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
- uint16_t view_id);
-/* Switch all vCPUs of the domain to the specified altp2m view */
-int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
- uint16_t view_id);
-int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
- uint16_t view_id, xen_pfn_t gfn, bool sve);
-int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
- uint16_t view_id, xen_pfn_t first_gfn,
- xen_pfn_t last_gfn, bool sve,
- xen_pfn_t *error_gfn, int32_t *error_code);
-int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
- uint16_t view_id, xen_pfn_t gfn, bool *sve);
-int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
- uint16_t view_id, xen_pfn_t gfn,
- xenmem_access_t access);
-int xc_altp2m_set_mem_access_multi(xc_interface *handle, uint32_t domid,
- uint16_t view_id, uint8_t *access,
- uint64_t *gfns, uint32_t nr);
-int xc_altp2m_get_mem_access(xc_interface *handle, uint32_t domid,
- uint16_t view_id, xen_pfn_t gfn,
- xenmem_access_t *access);
-int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
- uint16_t view_id, xen_pfn_t old_gfn,
- xen_pfn_t new_gfn);
-int xc_altp2m_get_vcpu_p2m_idx(xc_interface *handle, uint32_t domid,
- uint32_t vcpuid, uint16_t *p2midx);
-/*
- * Set view visibility for xc_altp2m_switch_to_view and vmfunc.
- * Note: If altp2m mode is set to mixed the guest is able to change the view
- * visibility and then call vmfunc.
- */
-int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
- uint16_t view_id, bool visible);
-
-/**
- * Mem paging operations.
- * Paging is supported only on the x86 architecture in 64 bit mode, with
- * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
- * support is considered experimental.
- */
-int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
-int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
-int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
-int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
- uint64_t gfn);
-int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
-int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
-int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
- uint64_t gfn, void *buffer);
-
-/**
- * Access tracking operations.
- * Supported only on Intel EPT 64 bit processors.
- */
-
-/*
- * Set a range of memory to a specific access.
- * Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
- * XENMEM_access_ + (rwx), and XENMEM_access_rx2rw
- */
-int xc_set_mem_access(xc_interface *xch, uint32_t domain_id,
- xenmem_access_t access, uint64_t first_pfn,
- uint32_t nr);
-
-/*
- * Set an array of pages to their respective access in the access array.
- * The nr parameter specifies the size of the pages and access arrays.
- * The same allowed access types as for xc_set_mem_access() apply.
- */
-int xc_set_mem_access_multi(xc_interface *xch, uint32_t domain_id,
- uint8_t *access, uint64_t *pages,
- uint32_t nr);
-
-/*
- * Gets the mem access for the given page (returned in access on success)
- */
-int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
- uint64_t pfn, xenmem_access_t *access);
-
-/*
- * Returns the VM_EVENT_INTERFACE version.
- */
-int xc_vm_event_get_version(xc_interface *xch);
-
-/***
- * Monitor control operations.
- *
- * Enables the VM event monitor ring and returns the mapped ring page.
- * This ring is used to deliver mem_access events, as well a set of additional
- * events that can be enabled with the xc_monitor_* functions.
- *
- * Will return NULL on error.
- * Caller has to unmap this page when done.
- */
-void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
-int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
-int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
-/*
- * Get a bitmap of supported monitor events in the form
- * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
- */
-int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
- uint32_t *capabilities);
-int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
- uint16_t index, bool enable, bool sync,
- uint64_t bitmask, bool onchangeonly);
-/*
- * A list of MSR indices can usually be found in /usr/include/asm/msr-index.h.
- * Please consult the Intel/AMD manuals for more information on
- * non-architectural indices.
- */
-int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
- bool enable, bool onchangeonly);
-int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id, bool enable);
-int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
- bool enable);
-int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
- bool enable);
-int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id,
- bool enable, bool sync, bool allow_userspace);
-/*
- * Disables page-walk mem_access events by emulating. If the
- * emulation can not be performed then a VM_EVENT_REASON_EMUL_UNIMPLEMENTED
- * event will be issued.
- */
-int xc_monitor_inguest_pagefault(xc_interface *xch, uint32_t domain_id,
- bool disable);
-int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
- bool enable, bool sync);
-int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable);
-int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
- bool enable);
-int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
- bool enable);
-/**
- * This function enables / disables emulation for each REP for a
- * REP-compatible instruction.
- *
- * @parm xch a handle to an open hypervisor interface.
- * @parm domain_id the domain id one wants to get the node affinity of.
- * @parm enable if 0 optimize when possible, else emulate each REP.
- * @return 0 on success, -1 on failure.
- */
-int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
- bool enable);
-
-/***
- * Memory sharing operations.
- *
- * Unles otherwise noted, these calls return 0 on succes, -1 and errno on
- * failure.
- *
- * Sharing is supported only on the x86 architecture in 64 bit mode, with
- * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
- * support is considered experimental.
-
- * Calls below return ENOSYS if not in the x86_64 architecture.
- * Calls below return ENODEV if the domain does not support HAP.
- * Calls below return ESRCH if the specified domain does not exist.
- * Calls below return EPERM if the caller is unprivileged for this domain.
- */
-
-/* Turn on/off sharing for the domid, depending on the enable flag.
- *
- * Returns EXDEV if trying to enable and the domain has had a PCI device
- * assigned for passthrough (these two features are mutually exclusive).
- *
- * When sharing for a domain is turned off, the domain may still reference
- * shared pages. Unsharing happens lazily. */
-int xc_memshr_control(xc_interface *xch,
- uint32_t domid,
- int enable);
-
-/* Create a communication ring in which the hypervisor will place ENOMEM
- * notifications.
- *
- * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
- * allocated, and thus the out-of-memory error occurr.
- *
- * For complete examples on how to plumb a notification ring, look into
- * xenpaging or xen-access.
- *
- * On receipt of a notification, the helper should ensure there is memory
- * available to the domain before retrying.
- *
- * If a domain encounters an ENOMEM condition when sharing and this ring
- * has not been set up, the hypervisor will crash the domain.
- *
- * Fails with:
- * EINVAL if port is NULL
- * EINVAL if the sharing ring has already been enabled
- * ENOSYS if no guest gfn has been specified to host the ring via an hvm param
- * EINVAL if the gfn for the ring has not been populated
- * ENOENT if the gfn for the ring is paged out, or cannot be unshared
- * EINVAL if the gfn for the ring cannot be written to
- * EINVAL if the domain is dying
- * ENOSPC if an event channel cannot be allocated for the ring
- * ENOMEM if memory cannot be allocated for internal data structures
- * EINVAL or EACCESS if the request is denied by the security policy
- */
-
-int xc_memshr_ring_enable(xc_interface *xch,
- uint32_t domid,
- uint32_t *port);
-/* Disable the ring for ENOMEM communication.
- * May fail with EINVAL if the ring was not enabled in the first place.
- */
-int xc_memshr_ring_disable(xc_interface *xch,
- uint32_t domid);
-
-/*
- * Calls below return EINVAL if sharing has not been enabled for the domain
- * Calls below return EINVAL if the domain is dying
- */
-/* Once a reponse to an ENOMEM notification is prepared, the tool can
- * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
- * event channel kick and/or this call. */
-int xc_memshr_domain_resume(xc_interface *xch,
- uint32_t domid);
-
-/* Select a page for sharing.
- *
- * A 64 bit opaque handle will be stored in handle. The hypervisor ensures
- * that if the page is modified, the handle will be invalidated, and future
- * users of it will fail. If the page has already been selected and is still
- * associated to a valid handle, the existing handle will be returned.
- *
- * May fail with:
- * EINVAL if the gfn is not populated or not sharable (mmio, etc)
- * ENOMEM if internal data structures cannot be allocated
- * E2BIG if the page is being referenced by other subsytems (e.g. qemu)
- * ENOENT or EEXIST if there are internal hypervisor errors.
- */
-int xc_memshr_nominate_gfn(xc_interface *xch,
- uint32_t domid,
- unsigned long gfn,
- uint64_t *handle);
-/* Same as above, but instead of a guest frame number, the input is a grant
- * reference provided by the guest.
- *
- * May fail with EINVAL if the grant reference is invalid.
- */
-int xc_memshr_nominate_gref(xc_interface *xch,
- uint32_t domid,
- grant_ref_t gref,
- uint64_t *handle);
-
-/* The three calls below may fail with
- * 10 (or -XENMEM_SHARING_OP_S_HANDLE_INVALID) if the handle passed as source
- * is invalid.
- * 9 (or -XENMEM_SHARING_OP_C_HANDLE_INVALID) if the handle passed as client is
- * invalid.
- */
-/* Share two nominated guest pages.
- *
- * If the call succeeds, both pages will point to the same backing frame (or
- * mfn). The hypervisor will verify the handles are still valid, but it will
- * not perform any sanity checking on the contens of the pages (the selection
- * mechanism for sharing candidates is entirely up to the user-space tool).
- *
- * After successful sharing, the client handle becomes invalid. Both <domain,
- * gfn> tuples point to the same mfn with the same handle, the one specified as
- * source. Either 3-tuple can be specified later for further re-sharing.
- */
-int xc_memshr_share_gfns(xc_interface *xch,
- uint32_t source_domain,
- unsigned long source_gfn,
- uint64_t source_handle,
- uint32_t client_domain,
- unsigned long client_gfn,
- uint64_t client_handle);
-
-/* Same as above, but share two grant references instead.
- *
- * May fail with EINVAL if either grant reference is invalid.
- */
-int xc_memshr_share_grefs(xc_interface *xch,
- uint32_t source_domain,
- grant_ref_t source_gref,
- uint64_t source_handle,
- uint32_t client_domain,
- grant_ref_t client_gref,
- uint64_t client_handle);
-
-/* Allows to add to the guest physmap of the client domain a shared frame
- * directly.
- *
- * May additionally fail with
- * 9 (-XENMEM_SHARING_OP_C_HANDLE_INVALID) if the physmap entry for the gfn is
- * not suitable.
- * ENOMEM if internal data structures cannot be allocated.
- * ENOENT if there is an internal hypervisor error.
- */
-int xc_memshr_add_to_physmap(xc_interface *xch,
- uint32_t source_domain,
- unsigned long source_gfn,
- uint64_t source_handle,
- uint32_t client_domain,
- unsigned long client_gfn);
-
-/* Allows to deduplicate a range of memory of a client domain. Using
- * this function is equivalent of calling xc_memshr_nominate_gfn for each gfn
- * in the two domains followed by xc_memshr_share_gfns.
- *
- * May fail with -EINVAL if the source and client domain have different
- * memory size or if memory sharing is not enabled on either of the domains.
- * May also fail with -ENOMEM if there isn't enough memory available to store
- * the sharing metadata before deduplication can happen.
- */
-int xc_memshr_range_share(xc_interface *xch,
- uint32_t source_domain,
- uint32_t client_domain,
- uint64_t first_gfn,
- uint64_t last_gfn);
-
-int xc_memshr_fork(xc_interface *xch,
- uint32_t source_domain,
- uint32_t client_domain,
- bool allow_with_iommu,
- bool block_interrupts);
-
-/*
- * Note: this function is only intended to be used on short-lived forks that
- * haven't yet aquired a lot of memory. In case the fork has a lot of memory
- * it is likely more performant to create a new fork with xc_memshr_fork.
- *
- * With VMs that have a lot of memory this call may block for a long time.
- */
-int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain);
-
-/* Debug calls: return the number of pages referencing the shared frame backing
- * the input argument. Should be one or greater.
- *
- * May fail with EINVAL if there is no backing shared frame for the input
- * argument.
- */
-int xc_memshr_debug_gfn(xc_interface *xch,
- uint32_t domid,
- unsigned long gfn);
-/* May additionally fail with EINVAL if the grant reference is invalid. */
-int xc_memshr_debug_gref(xc_interface *xch,
- uint32_t domid,
- grant_ref_t gref);
-
-/* Audits the share subsystem.
- *
- * Returns ENOSYS if not supported (may not be compiled into the hypervisor).
- *
- * Returns the number of errors found during auditing otherwise. May be (should
- * be!) zero.
- *
- * If debugtrace support has been compiled into the hypervisor and is enabled,
- * verbose descriptions for the errors are available in the hypervisor console.
- */
-int xc_memshr_audit(xc_interface *xch);
-
-/* Stats reporting.
- *
- * At any point in time, the following equality should hold for a host:
- *
- * Let dominfo(d) be the xc_dominfo_t struct filled by a call to
- * xc_domain_getinfo(d)
- *
- * The summation of dominfo(d)->shr_pages for all domains in the system
- * should be equal to
- * xc_sharing_freed_pages + xc_sharing_used_frames
- */
-/*
- * This function returns the total number of pages freed by using sharing
- * on the system. For example, if two domains contain a single entry in
- * their p2m table that points to the same shared page (and no other pages
- * in the system are shared), then this function should return 1.
- */
-long xc_sharing_freed_pages(xc_interface *xch);
-
-/*
- * This function returns the total number of frames occupied by shared
- * pages on the system. This is independent of the number of domains
- * pointing at these frames. For example, in the above scenario this
- * should return 1. (And dominfo(d) for each of the two domains should return 1
- * as well).
- *
- * Note that some of these sharing_used_frames may be referenced by
- * a single domain page, and thus not realize any savings. The same
- * applies to some of the pages counted in dominfo(d)->shr_pages.
- */
-long xc_sharing_used_frames(xc_interface *xch);
-/*** End sharing interface ***/
-
-int xc_flask_load(xc_interface *xc_handle, char *buf, uint32_t size);
-int xc_flask_context_to_sid(xc_interface *xc_handle, char *buf, uint32_t size, uint32_t *sid);
-int xc_flask_sid_to_context(xc_interface *xc_handle, int sid, char *buf, uint32_t size);
-int xc_flask_getenforce(xc_interface *xc_handle);
-int xc_flask_setenforce(xc_interface *xc_handle, int mode);
-int xc_flask_getbool_byid(xc_interface *xc_handle, int id, char *name, uint32_t size, int *curr, int *pend);
-int xc_flask_getbool_byname(xc_interface *xc_handle, char *name, int *curr, int *pend);
-int xc_flask_setbool(xc_interface *xc_handle, char *name, int value, int commit);
-int xc_flask_add_pirq(xc_interface *xc_handle, unsigned int pirq, char *scontext);
-int xc_flask_add_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high,
- char *scontext);
-int xc_flask_add_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high,
- char *scontext);
-int xc_flask_add_device(xc_interface *xc_handle, unsigned long device, char *scontext);
-int xc_flask_del_pirq(xc_interface *xc_handle, unsigned int pirq);
-int xc_flask_del_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high);
-int xc_flask_del_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high);
-int xc_flask_del_device(xc_interface *xc_handle, unsigned long device);
-int xc_flask_access(xc_interface *xc_handle, const char *scon, const char *tcon,
- uint16_t tclass, uint32_t req,
- uint32_t *allowed, uint32_t *decided,
- uint32_t *auditallow, uint32_t *auditdeny,
- uint32_t *seqno);
-int xc_flask_avc_cachestats(xc_interface *xc_handle, char *buf, int size);
-int xc_flask_policyvers(xc_interface *xc_handle);
-int xc_flask_avc_hashstats(xc_interface *xc_handle, char *buf, int size);
-int xc_flask_getavc_threshold(xc_interface *xc_handle);
-int xc_flask_setavc_threshold(xc_interface *xc_handle, int threshold);
-int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid);
-
-struct elf_binary;
-void xc_elf_set_logfile(xc_interface *xch, struct elf_binary *elf,
- int verbose);
-/* Useful for callers who also use libelf. */
-
-/*
- * Execute an image previously loaded with xc_kexec_load().
- *
- * Does not return on success.
- *
- * Fails with:
- * ENOENT if the specified image has not been loaded.
- */
-int xc_kexec_exec(xc_interface *xch, int type);
-
-/*
- * Find the machine address and size of certain memory areas.
- *
- * KEXEC_RANGE_MA_CRASH crash area
- * KEXEC_RANGE_MA_XEN Xen itself
- * KEXEC_RANGE_MA_CPU CPU note for CPU number 'nr'
- * KEXEC_RANGE_MA_XENHEAP xenheap
- * KEXEC_RANGE_MA_EFI_MEMMAP EFI Memory Map
- * KEXEC_RANGE_MA_VMCOREINFO vmcoreinfo
- *
- * Fails with:
- * EINVAL if the range or CPU number isn't valid.
- */
-int xc_kexec_get_range(xc_interface *xch, int range, int nr,
- uint64_t *size, uint64_t *start);
-
-/*
- * Load a kexec image into memory.
- *
- * The image may be of type KEXEC_TYPE_DEFAULT (executed on request)
- * or KEXEC_TYPE_CRASH (executed on a crash).
- *
- * The image architecture may be a 32-bit variant of the hypervisor
- * architecture (e.g, EM_386 on a x86-64 hypervisor).
- *
- * Fails with:
- * ENOMEM if there is insufficient memory for the new image.
- * EINVAL if the image does not fit into the crash area or the entry
- * point isn't within one of segments.
- * EBUSY if another image is being executed.
- */
-int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch,
- uint64_t entry_maddr,
- uint32_t nr_segments, xen_kexec_segment_t *segments);
-
-/*
- * Unload a kexec image.
- *
- * This prevents a KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH image from
- * being executed. The crash images are not cleared from the crash
- * region.
- */
-int xc_kexec_unload(xc_interface *xch, int type);
-
-/*
- * Find out whether the image has been succesfully loaded.
- *
- * The type can be either KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
- * If zero is returned, that means no image is loaded for the type.
- * If one is returned, that means an image is loaded for the type.
- * Otherwise, negative return value indicates error.
- */
-int xc_kexec_status(xc_interface *xch, int type);
-
-typedef xenpf_resource_entry_t xc_resource_entry_t;
-
-/*
- * Generic resource operation which contains multiple non-preemptible
- * resource access entries that passed to xc_resource_op().
- */
-struct xc_resource_op {
- uint64_t result; /* on return, check this field first */
- uint32_t cpu; /* which cpu to run */
- uint32_t nr_entries; /* number of resource entries */
- xc_resource_entry_t *entries;
-};
-
-typedef struct xc_resource_op xc_resource_op_t;
-int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops);
-
-#if defined(__i386__) || defined(__x86_64__)
-enum xc_psr_cmt_type {
- XC_PSR_CMT_L3_OCCUPANCY,
- XC_PSR_CMT_TOTAL_MEM_COUNT,
- XC_PSR_CMT_LOCAL_MEM_COUNT,
-};
-typedef enum xc_psr_cmt_type xc_psr_cmt_type;
-
-enum xc_psr_type {
- XC_PSR_CAT_L3_CBM = 1,
- XC_PSR_CAT_L3_CBM_CODE = 2,
- XC_PSR_CAT_L3_CBM_DATA = 3,
- XC_PSR_CAT_L2_CBM = 4,
- XC_PSR_MBA_THRTL = 5,
-};
-typedef enum xc_psr_type xc_psr_type;
-
-enum xc_psr_feat_type {
- XC_PSR_CAT_L3,
- XC_PSR_CAT_L2,
- XC_PSR_MBA,
-};
-typedef enum xc_psr_feat_type xc_psr_feat_type;
-
-union xc_psr_hw_info {
- struct {
- uint32_t cos_max;
- uint32_t cbm_len;
- bool cdp_enabled;
- } cat;
-
- struct {
- uint32_t cos_max;
- uint32_t thrtl_max;
- bool linear;
- } mba;
-};
-typedef union xc_psr_hw_info xc_psr_hw_info;
-
-int xc_psr_cmt_attach(xc_interface *xch, uint32_t domid);
-int xc_psr_cmt_detach(xc_interface *xch, uint32_t domid);
-int xc_psr_cmt_get_domain_rmid(xc_interface *xch, uint32_t domid,
- uint32_t *rmid);
-int xc_psr_cmt_get_total_rmid(xc_interface *xch, uint32_t *total_rmid);
-int xc_psr_cmt_get_l3_upscaling_factor(xc_interface *xch,
- uint32_t *upscaling_factor);
-int xc_psr_cmt_get_l3_event_mask(xc_interface *xch, uint32_t *event_mask);
-int xc_psr_cmt_get_l3_cache_size(xc_interface *xch, uint32_t cpu,
- uint32_t *l3_cache_size);
-int xc_psr_cmt_get_data(xc_interface *xch, uint32_t rmid, uint32_t cpu,
- uint32_t psr_cmt_type, uint64_t *monitor_data,
- uint64_t *tsc);
-int xc_psr_cmt_enabled(xc_interface *xch);
-
-int xc_psr_set_domain_data(xc_interface *xch, uint32_t domid,
- xc_psr_type type, uint32_t target,
- uint64_t data);
-int xc_psr_get_domain_data(xc_interface *xch, uint32_t domid,
- xc_psr_type type, uint32_t target,
- uint64_t *data);
-int xc_psr_get_hw_info(xc_interface *xch, uint32_t socket,
- xc_psr_feat_type type, xc_psr_hw_info *hw_info);
-
-int xc_get_cpu_levelling_caps(xc_interface *xch, uint32_t *caps);
-int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
- uint32_t *nr_features, uint32_t *featureset);
-
-int xc_get_cpu_policy_size(xc_interface *xch, uint32_t *nr_leaves,
- uint32_t *nr_msrs);
-int xc_get_system_cpu_policy(xc_interface *xch, uint32_t index,
- uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
- uint32_t *nr_msrs, xen_msr_entry_t *msrs);
-int xc_get_domain_cpu_policy(xc_interface *xch, uint32_t domid,
- uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
- uint32_t *nr_msrs, xen_msr_entry_t *msrs);
-int xc_set_domain_cpu_policy(xc_interface *xch, uint32_t domid,
- uint32_t nr_leaves, xen_cpuid_leaf_t *leaves,
- uint32_t nr_msrs, xen_msr_entry_t *msrs,
- uint32_t *err_leaf_p, uint32_t *err_subleaf_p,
- uint32_t *err_msr_p);
-
-uint32_t xc_get_cpu_featureset_size(void);
-
-enum xc_static_cpu_featuremask {
- XC_FEATUREMASK_KNOWN,
- XC_FEATUREMASK_SPECIAL,
- XC_FEATUREMASK_PV_MAX,
- XC_FEATUREMASK_PV_DEF,
- XC_FEATUREMASK_HVM_SHADOW_MAX,
- XC_FEATUREMASK_HVM_SHADOW_DEF,
- XC_FEATUREMASK_HVM_HAP_MAX,
- XC_FEATUREMASK_HVM_HAP_DEF,
-};
-const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
-
-#endif
-
-int xc_livepatch_upload(xc_interface *xch,
- char *name, unsigned char *payload, uint32_t size);
-
-int xc_livepatch_get(xc_interface *xch,
- char *name,
- xen_livepatch_status_t *status);
-
-/*
- * Get a number of available payloads and get actual total size of
- * the payloads' name and metadata arrays.
- *
- * This functions is typically executed first before the xc_livepatch_list()
- * to obtain the sizes and correctly allocate all necessary data resources.
- *
- * The return value is zero if the hypercall completed successfully.
- *
- * If there was an error performing the sysctl operation, the return value
- * will contain the hypercall error code value.
- */
-int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
- uint32_t *name_total_size,
- uint32_t *metadata_total_size);
-
-/*
- * The heart of this function is to get an array of the following objects:
- * - xen_livepatch_status_t: states and return codes of payloads
- * - name: names of payloads
- * - len: lengths of corresponding payloads' names
- * - metadata: payloads' metadata
- * - metadata_len: lengths of corresponding payloads' metadata
- *
- * However it is complex because it has to deal with the hypervisor
- * returning some of the requested data or data being stale
- * (another hypercall might alter the list).
- *
- * The parameters that the function expects to contain data from
- * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
- * 'left' are also updated with the number of entries filled out
- * and respectively the number of entries left to get from hypervisor.
- *
- * It is expected that the caller of this function will first issue the
- * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
- * and all metadata as well as the current number of payload entries.
- * The total sizes are required and supplied via the 'name_total_size' and
- * 'metadata_total_size' parameters.
- *
- * The 'max' is to be provided by the caller with the maximum number of
- * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
- * can be filled up with.
- *
- * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
- * structure size.
- *
- * Each entry in the 'name' array may have an arbitrary size.
- *
- * Each entry in the 'len' array is expected to be of uint32_t size.
- *
- * Each entry in the 'metadata' array may have an arbitrary size.
- *
- * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
- *
- * The return value is zero if the hypercall completed successfully.
- * Note that the return value is _not_ the amount of entries filled
- * out - that is saved in 'done'.
- *
- * If there was an error performing the operation, the return value
- * will contain an negative -EXX type value. The 'done' and 'left'
- * will contain the number of entries that had been succesfully
- * retrieved (if any).
- */
-int xc_livepatch_list(xc_interface *xch, const unsigned int max,
- const unsigned int start,
- struct xen_livepatch_status *info,
- char *name, uint32_t *len,
- const uint32_t name_total_size,
- char *metadata, uint32_t *metadata_len,
- const uint32_t metadata_total_size,
- unsigned int *done, unsigned int *left);
-
-/*
- * The operations are asynchronous and the hypervisor may take a while
- * to complete them. The `timeout` offers an option to expire the
- * operation if it could not be completed within the specified time
- * (in ns). Value of 0 means let hypervisor decide the best timeout.
- * The `flags` allows to pass extra parameters to the actions.
- */
-int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
-int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
-int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
-int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
-
-/*
- * Ensure cache coherency after memory modifications. A call to this function
- * is only required on ARM as the x86 architecture provides cache coherency
- * guarantees. Calling this function on x86 is allowed but has no effect.
- */
-int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
- xen_pfn_t start_pfn, xen_pfn_t nr_pfns);
-
-/* Compat shims */
-#include "xenctrl_compat.h"
-
-#endif /* XENCTRL_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/*
- * Compat shims for use of 3rd party consumers of libxenctrl
- * functionality which has been split into separate libraries.
- *
- * New code should use the separate libraries.
- *
- * Each interface must be opted-into separately by defining:
- *
- * XC_WANT_COMPAT_EVTCHN_API
- * - Functions relating to /dev/xen/evtchn
- */
-#ifndef XENCTRL_COMPAT_H
-#define XENCTRL_COMPAT_H
-
-#ifdef XC_WANT_COMPAT_MAP_FOREIGN_API
-/**
- * Memory maps a range within one domain to a local address range. Mappings
- * should be unmapped with munmap and should follow the same rules as mmap
- * regarding page alignment. Returns NULL on failure.
- *
- * @parm xch a handle on an open hypervisor interface
- * @parm dom the domain to map memory from
- * @parm size the amount of memory to map (in multiples of page size)
- * @parm prot same flag as in mmap().
- * @parm mfn the frame address to map.
- */
-void *xc_map_foreign_range(xc_interface *xch, uint32_t dom,
- int size, int prot,
- unsigned long mfn );
-
-void *xc_map_foreign_pages(xc_interface *xch, uint32_t dom, int prot,
- const xen_pfn_t *arr, int num );
-
-/* Nothing within the library itself other than the compat wrapper
- * itself should be using this, everything inside has access to
- * xenforeignmemory_map().
- */
-#if !defined(XC_INTERNAL_COMPAT_MAP_FOREIGN_API) || \
- defined(XC_BUILDING_COMPAT_MAP_FOREIGN_API)
-/**
- * Like xc_map_foreign_pages(), except it can succeed partially.
- * When a page cannot be mapped, its respective field in @err is
- * set to the corresponding errno value.
- */
-void *xc_map_foreign_bulk(xc_interface *xch, uint32_t dom, int prot,
- const xen_pfn_t *arr, int *err, unsigned int num);
-#endif
-
-#endif
-
-#ifdef XC_WANT_COMPAT_EVTCHN_API
-
-typedef struct xenevtchn_handle xc_evtchn;
-typedef xc_evtchn_port_or_error_t evtchn_port_or_error_t;
-
-xc_evtchn *xc_evtchn_open(xentoollog_logger *logger,
- unsigned open_flags);
-int xc_evtchn_close(xc_evtchn *xce);
-int xc_evtchn_fd(xc_evtchn *xce);
-int xc_evtchn_notify(xc_evtchn *xce, evtchn_port_t port);
-xc_evtchn_port_or_error_t
-xc_evtchn_bind_unbound_port(xc_evtchn *xce, uint32_t domid);
-xc_evtchn_port_or_error_t
-xc_evtchn_bind_interdomain(xc_evtchn *xce, uint32_t domid,
- evtchn_port_t remote_port);
-xc_evtchn_port_or_error_t
-xc_evtchn_bind_virq(xc_evtchn *xce, unsigned int virq);
-int xc_evtchn_unbind(xc_evtchn *xce, evtchn_port_t port);
-xc_evtchn_port_or_error_t
-xc_evtchn_pending(xc_evtchn *xce);
-int xc_evtchn_unmask(xc_evtchn *xce, evtchn_port_t port);
-
-#endif /* XC_WANT_COMPAT_EVTCHN_API */
-
-#ifdef XC_WANT_COMPAT_GNTTAB_API
-
-typedef struct xengntdev_handle xc_gnttab;
-
-xc_gnttab *xc_gnttab_open(xentoollog_logger *logger,
- unsigned open_flags);
-int xc_gnttab_close(xc_gnttab *xcg);
-void *xc_gnttab_map_grant_ref(xc_gnttab *xcg,
- uint32_t domid,
- uint32_t ref,
- int prot);
-void *xc_gnttab_map_grant_refs(xc_gnttab *xcg,
- uint32_t count,
- uint32_t *domids,
- uint32_t *refs,
- int prot);
-void *xc_gnttab_map_domain_grant_refs(xc_gnttab *xcg,
- uint32_t count,
- uint32_t domid,
- uint32_t *refs,
- int prot);
-void *xc_gnttab_map_grant_ref_notify(xc_gnttab *xcg,
- uint32_t domid,
- uint32_t ref,
- int prot,
- uint32_t notify_offset,
- evtchn_port_t notify_port);
-int xc_gnttab_munmap(xc_gnttab *xcg,
- void *start_address,
- uint32_t count);
-int xc_gnttab_set_max_grants(xc_gnttab *xcg,
- uint32_t count);
-
-typedef struct xengntdev_handle xc_gntshr;
-
-xc_gntshr *xc_gntshr_open(xentoollog_logger *logger,
- unsigned open_flags);
-int xc_gntshr_close(xc_gntshr *xcg);
-void *xc_gntshr_share_pages(xc_gntshr *xcg, uint32_t domid,
- int count, uint32_t *refs, int writable);
-void *xc_gntshr_share_page_notify(xc_gntshr *xcg, uint32_t domid,
- uint32_t *ref, int writable,
- uint32_t notify_offset,
- evtchn_port_t notify_port);
-int xc_gntshr_munmap(xc_gntshr *xcg, void *start_address, uint32_t count);
-
-#endif /* XC_WANT_COMPAT_GNTTAB_API */
-
-#ifdef XC_WANT_COMPAT_DEVICEMODEL_API
-
-int xc_hvm_create_ioreq_server(
- xc_interface *xch, uint32_t domid, int handle_bufioreq,
- ioservid_t *id);
-int xc_hvm_get_ioreq_server_info(
- xc_interface *xch, uint32_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
- xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_port);
-int xc_hvm_map_io_range_to_ioreq_server(
- xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
- uint64_t start, uint64_t end);
-int xc_hvm_unmap_io_range_from_ioreq_server(
- xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
- uint64_t start, uint64_t end);
-int xc_hvm_map_pcidev_to_ioreq_server(
- xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
- uint8_t bus, uint8_t device, uint8_t function);
-int xc_hvm_unmap_pcidev_from_ioreq_server(
- xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
- uint8_t bus, uint8_t device, uint8_t function);
-int xc_hvm_destroy_ioreq_server(
- xc_interface *xch, uint32_t domid, ioservid_t id);
-int xc_hvm_set_ioreq_server_state(
- xc_interface *xch, uint32_t domid, ioservid_t id, int enabled);
-int xc_hvm_set_pci_intx_level(
- xc_interface *xch, uint32_t domid, uint16_t segment, uint8_t bus,
- uint8_t device, uint8_t intx, unsigned int level);
-int xc_hvm_set_isa_irq_level(
- xc_interface *xch, uint32_t domid, uint8_t irq, unsigned int level);
-int xc_hvm_set_pci_link_route(
- xc_interface *xch, uint32_t domid, uint8_t link, uint8_t irq);
-int xc_hvm_inject_msi(
- xc_interface *xch, uint32_t domid, uint64_t msi_addr, uint32_t msi_data);
-int xc_hvm_track_dirty_vram(
- xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr,
- unsigned long *dirty_bitmap);
-int xc_hvm_modified_memory(
- xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr);
-int xc_hvm_set_mem_type(
- xc_interface *xch, uint32_t domid, hvmmem_type_t type,
- uint64_t first_pfn, uint32_t nr);
-int xc_hvm_inject_trap(
- xc_interface *xch, uint32_t domid, int vcpu, uint8_t vector,
- uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t cr2);
-int xc_domain_pin_memory_cacheattr(
- xc_interface *xch, uint32_t domid, uint64_t start, uint64_t end,
- uint32_t type);
-
-#endif /* XC_WANT_COMPAT_DEVICEMODEL_API */
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
SRCS-$(CONFIG_MiniOS) += compat.c
include $(XEN_ROOT)/tools/libs/libs.mk
-
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxendevicemodel)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
+++ /dev/null
-/*
- * Copyright (c) 2017 Citrix Systems Inc.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef XENDEVICEMODEL_H
-#define XENDEVICEMODEL_H
-
-#ifdef __XEN_TOOLS__
-
-#include <stdint.h>
-
-#include <xen/xen.h>
-#include <xen/hvm/dm_op.h>
-#include <xen/hvm/hvm_op.h>
-
-/* Callers who don't care don't need to #include <xentoollog.h> */
-struct xentoollog_logger;
-
-typedef struct xendevicemodel_handle xendevicemodel_handle;
-
-xendevicemodel_handle *xendevicemodel_open(struct xentoollog_logger *logger,
- unsigned int open_flags);
-
-int xendevicemodel_close(xendevicemodel_handle *dmod);
-
-/*
- * IOREQ Server API. (See section on IOREQ Servers in public/hvm_op.h).
- */
-
-/**
- * This function instantiates an IOREQ Server.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm handle_bufioreq how should the IOREQ Server handle buffered
- * requests (HVM_IOREQSRV_BUFIOREQ_*)?
- * @parm id pointer to an ioservid_t to receive the IOREQ Server id.
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_create_ioreq_server(
- xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
- ioservid_t *id);
-
-/**
- * This function retrieves the necessary information to allow an
- * emulator to use an IOREQ Server.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm id the IOREQ Server id.
- * @parm ioreq_gfn pointer to a xen_pfn_t to receive the synchronous ioreq
- * gfn. (May be NULL if not required)
- * @parm bufioreq_gfn pointer to a xen_pfn_t to receive the buffered ioreq
- * gfn. (May be NULL if not required)
- * @parm bufioreq_port pointer to a evtchn_port_t to receive the buffered
- * ioreq event channel. (May be NULL if not required)
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_get_ioreq_server_info(
- xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
- xen_pfn_t *ioreq_gfn, xen_pfn_t *bufioreq_gfn,
- evtchn_port_t *bufioreq_port);
-
-/**
- * This function registers a range of memory or I/O ports for emulation.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm id the IOREQ Server id.
- * @parm is_mmio is this a range of ports or memory
- * @parm start start of range
- * @parm end end of range (inclusive).
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_map_io_range_to_ioreq_server(
- xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
- uint64_t start, uint64_t end);
-
-/**
- * This function deregisters a range of memory or I/O ports for emulation.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm id the IOREQ Server id.
- * @parm is_mmio is this a range of ports or memory
- * @parm start start of range
- * @parm end end of range (inclusive).
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_unmap_io_range_from_ioreq_server(
- xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
- uint64_t start, uint64_t end);
-
-/**
- * This function registers/deregisters a memory type for emulation.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced.
- * @parm id the IOREQ Server id.
- * @parm type the memory type to be emulated. For now, only HVMMEM_ioreq_server
- * is supported, and in the future new types can be introduced, e.g.
- * HVMMEM_ioreq_serverX mapped to ioreq server X.
- * @parm flags operations to be emulated; 0 for unmap. For now, only write
- * operations will be emulated and can be extended to emulate
- * read ones in the future.
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_map_mem_type_to_ioreq_server(
- xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, uint16_t type,
- uint32_t flags);
-
-/**
- * This function registers a PCI device for config space emulation.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm id the IOREQ Server id.
- * @parm segment the PCI segment of the device
- * @parm bus the PCI bus of the device
- * @parm device the 'slot' number of the device
- * @parm function the function number of the device
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_map_pcidev_to_ioreq_server(
- xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
- uint16_t segment, uint8_t bus, uint8_t device, uint8_t function);
-
-/**
- * This function deregisters a PCI device for config space emulation.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm id the IOREQ Server id.
- * @parm segment the PCI segment of the device
- * @parm bus the PCI bus of the device
- * @parm device the 'slot' number of the device
- * @parm function the function number of the device
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_unmap_pcidev_from_ioreq_server(
- xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
- uint16_t segment, uint8_t bus, uint8_t device, uint8_t function);
-
-/**
- * This function destroys an IOREQ Server.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm id the IOREQ Server id.
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_destroy_ioreq_server(
- xendevicemodel_handle *dmod, domid_t domid, ioservid_t id);
-
-/**
- * This function sets IOREQ Server state. An IOREQ Server
- * will not be passed emulation requests until it is in
- * the enabled state.
- * Note that the contents of the ioreq_gfn and bufioreq_gfn are
- * not meaningful until the IOREQ Server is in the enabled state.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm id the IOREQ Server id.
- * @parm enabled the state.
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_set_ioreq_server_state(
- xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled);
-
-/**
- * This function sets the level of INTx pin of an emulated PCI device.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm segment the PCI segment number of the emulated device
- * @parm bus the PCI bus number of the emulated device
- * @parm device the PCI device number of the emulated device
- * @parm intx the INTx pin to modify (0 => A .. 3 => D)
- * @parm level the level (1 for asserted, 0 for de-asserted)
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_set_pci_intx_level(
- xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
- uint8_t bus, uint8_t device, uint8_t intx, unsigned int level);
-
-/**
- * This function sets the level of an ISA IRQ line.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm irq the IRQ number (0 - 15)
- * @parm level the level (1 for asserted, 0 for de-asserted)
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_set_isa_irq_level(
- xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
- unsigned int level);
-
-/**
- * This function maps a PCI INTx line to a an IRQ line.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm line the INTx line (0 => A .. 3 => B)
- * @parm irq the IRQ number (0 - 15)
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_set_pci_link_route(
- xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq);
-
-/**
- * This function injects an MSI into a guest.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm msi_addr the MSI address (0xfeexxxxx)
- * @parm msi_data the MSI data
- * @return 0 on success, -1 on failure.
-*/
-int xendevicemodel_inject_msi(
- xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
- uint32_t msi_data);
-
-/**
- * This function enables tracking of changes in the VRAM area.
- *
- * The following is done atomically:
- * - get the dirty bitmap since the last call.
- * - set up dirty tracking area for period up to the next call.
- * - clear the dirty tracking area.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm first_pfn the start of the area to track
- * @parm nr the number of pages to track
- * @parm dirty_bitmal a pointer to the bitmap to be updated
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_track_dirty_vram(
- xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
- uint32_t nr, unsigned long *dirty_bitmap);
-
-/**
- * This function notifies the hypervisor that a set of contiguous
- * domain pages have been modified.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm first_pfn the start of the modified area
- * @parm nr the number of pages modified
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_modified_memory(
- xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
- uint32_t nr);
-
-/**
- * This function notifies the hypervisor that a set of discontiguous
- * domain pages have been modified.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm extents an array of extent structs, which each hold
- a start_pfn and nr (number of pfns).
- * @parm nr the number of extents in the array
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_modified_memory_bulk(
- xendevicemodel_handle *dmod, domid_t domid,
- struct xen_dm_op_modified_memory_extent extents[], uint32_t nr);
-
-/**
- * This function notifies the hypervisor that a set of domain pages
- * are to be treated in a specific way. (See the definition of
- * hvmmem_type_t).
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm mem_type determines how the set is to be treated
- * @parm first_pfn the start of the set
- * @parm nr the number of pages in the set
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_set_mem_type(
- xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
- uint64_t first_pfn, uint32_t nr);
-
-/**
- * This function injects an event into a vCPU to take effect the next
- * time it resumes.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm vcpu the vcpu id
- * @parm vector the interrupt vector
- * @parm type the event type (see the definition of enum x86_event_type)
- * @parm error_code the error code or ~0 to skip
- * @parm insn_len the instruction length
- * @parm extra type-specific extra data (%cr2 for #PF, pending_dbg for #DB)
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_inject_event(
- xendevicemodel_handle *dmod, domid_t domid, int vcpu, uint8_t vector,
- uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t extra);
-
-/**
- * Shuts the domain down.
- *
- * @parm reason usually enum sched_shutdown_reason, see xen/sched.h
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_shutdown(
- xendevicemodel_handle *dmod, domid_t domid, unsigned int reason);
-
-/*
- * Relocate GFNs for the specified domain.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm size Number of GFNs to process
- * @parm src_gfn Starting GFN to relocate
- * @parm dst_gfn Starting GFN where GFNs should be relocated
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_relocate_memory(
- xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
- uint64_t dst_gfn);
-
-/**
- * Pins caching type of RAM space.
- *
- * @parm dmod a handle to an open devicemodel interface.
- * @parm domid the domain id to be serviced
- * @parm start Start gfn
- * @parm end End gfn
- * @parm type XEN_DMOP_MEM_CACHEATTR_*
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_pin_memory_cacheattr(
- xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
- uint32_t type);
-
-/**
- * This function restricts the use of this handle to the specified
- * domain.
- *
- * @parm dmod handle to the open devicemodel interface
- * @parm domid the domain id
- * @return 0 on success, -1 on failure.
- */
-int xendevicemodel_restrict(xendevicemodel_handle *dmod, domid_t domid);
-
-#endif /* __XEN_TOOLS__ */
-
-#endif /* XENDEVICEMODEL_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
SRCS-$(CONFIG_MiniOS) += minios.c
include $(XEN_ROOT)/tools/libs/libs.mk
-
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenevtchn)/include
+++ /dev/null
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * Split off from:
- * xenctrl.h
- *
- * A library for low-level access to the Xen control interfaces.
- *
- * Copyright (c) 2003-2004, K A Fraser.
- */
-
-#ifndef XENEVTCHN_H
-#define XENEVTCHN_H
-
-#include <stdint.h>
-
-#include <xen/event_channel.h>
-
-/* A port identifier is guaranteed to fit in 31 bits. */
-typedef int xenevtchn_port_or_error_t;
-
-typedef struct xenevtchn_handle xenevtchn_handle;
-
-/* Callers who don't care don't need to #include <xentoollog.h> */
-struct xentoollog_logger;
-
-/*
- * EVENT CHANNEL FUNCTIONS
- *
- * None of these do any logging.
- */
-
-/*
- * Return a handle to the event channel driver, or NULL on failure, in
- * which case errno will be set appropriately.
- *
- * Note: After fork(2) a child process must not use any opened evtchn
- * handle inherited from their parent, nor access any grant mapped
- * areas associated with that handle.
- *
- * The child must open a new handle if they want to interact with
- * evtchn.
- *
- * Calling exec(2) in a child will safely (and reliably) reclaim any
- * allocated resources via a xenevtchn_handle in the parent.
- *
- * A child which does not call exec(2) may safely call
- * xenevtchn_close() on a xenevtchn_handle inherited from their
- * parent. This will attempt to reclaim any resources associated with
- * that handle. Note that in some implementations this reclamation may
- * not be completely effective, in this case any affected resources
- * remain allocated.
- *
- * Calling xenevtchn_close() is the only safe operation on a
- * xenevtchn_handle which has been inherited.
- */
-/* Currently no flags are defined */
-xenevtchn_handle *xenevtchn_open(struct xentoollog_logger *logger,
- unsigned open_flags);
-
-/*
- * Close a handle previously allocated with xenevtchn_open().
- */
-int xenevtchn_close(xenevtchn_handle *xce);
-
-/*
- * Return an fd that can be select()ed on.
- *
- * Note that due to bugs, setting this fd to non blocking may not
- * work: you would hope that it would result in xenevtchn_pending
- * failing with EWOULDBLOCK if there are no events signaled, but in
- * fact it may block. (Bug is present in at least Linux 3.12, and
- * perhaps on other platforms or later version.)
- *
- * To be safe, you must use poll() or select() before each call to
- * xenevtchn_pending. If you have multiple threads (or processes)
- * sharing a single xce handle this will not work, and there is no
- * straightforward workaround. Please design your program some other
- * way.
- */
-int xenevtchn_fd(xenevtchn_handle *xce);
-
-/*
- * Notify the given event channel. Returns -1 on failure, in which case
- * errno will be set appropriately.
- */
-int xenevtchn_notify(xenevtchn_handle *xce, evtchn_port_t port);
-
-/*
- * Returns a new event port awaiting interdomain connection from the given
- * domain ID, or -1 on failure, in which case errno will be set appropriately.
- */
-xenevtchn_port_or_error_t
-xenevtchn_bind_unbound_port(xenevtchn_handle *xce, uint32_t domid);
-
-/*
- * Returns a new event port bound to the remote port for the given domain ID,
- * or -1 on failure, in which case errno will be set appropriately.
- */
-xenevtchn_port_or_error_t
-xenevtchn_bind_interdomain(xenevtchn_handle *xce, uint32_t domid,
- evtchn_port_t remote_port);
-
-/*
- * Bind an event channel to the given VIRQ. Returns the event channel bound to
- * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
- */
-xenevtchn_port_or_error_t
-xenevtchn_bind_virq(xenevtchn_handle *xce, unsigned int virq);
-
-/*
- * Unbind the given event channel. Returns -1 on failure, in which case errno
- * will be set appropriately.
- */
-int xenevtchn_unbind(xenevtchn_handle *xce, evtchn_port_t port);
-
-/*
- * Return the next event channel to become pending, or -1 on failure, in which
- * case errno will be set appropriately.
- *
- * At the hypervisor level the event channel will have been masked,
- * and then cleared, by the underlying machinery (evtchn kernel
- * driver, or equivalent). So if the event channel is signaled again
- * after it is returned here, it will be queued up, and delivered
- * again after you unmask it. (See the documentation in the Xen
- * public header event_channel.h.)
- *
- * On receiving the notification from xenevtchn_pending, you should
- * normally: check (by other means) what work needs doing; do the
- * necessary work (if any); unmask the event channel with
- * xenevtchn_unmask (if you want to receive any further
- * notifications).
- */
-xenevtchn_port_or_error_t
-xenevtchn_pending(xenevtchn_handle *xce);
-
-/*
- * Unmask the given event channel. Returns -1 on failure, in which case errno
- * will be set appropriately.
- */
-int xenevtchn_unmask(xenevtchn_handle *xce, evtchn_port_t port);
-
-/**
- * This function restricts the use of this handle to the specified
- * domain.
- *
- * @parm xce handle to the open evtchn interface
- * @parm domid the domain id
- * @return 0 on success, -1 on failure with errno set appropriately.
- */
-int xenevtchn_restrict(xenevtchn_handle *xce, domid_t domid);
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
SRCS-$(CONFIG_MiniOS) += minios.c
include $(XEN_ROOT)/tools/libs/libs.mk
-
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenforeignmemory)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
+++ /dev/null
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef XENFOREIGNMEMORY_H
-#define XENFOREIGNMEMORY_H
-
-/*
- * This library allows you to map foreign domain memory, subject to
- * permissions for both the process and the domain in which the
- * process runs.
- */
-
-#include <stdint.h>
-#include <stddef.h>
-
-#include <xen/xen.h>
-
-/* Callers who don't care don't need to #include <xentoollog.h> */
-struct xentoollog_logger;
-
-typedef struct xenforeignmemory_handle xenforeignmemory_handle;
-
-/*
- * Return a handle onto the foreign memory mapping driver. Logs errors.
- *
- * Note: After fork(2) a child process must not use any opened
- * foreignmemory handle inherited from their parent, nor access any
- * grant mapped areas associated with that handle.
- *
- * The child must open a new handle if they want to interact with
- * foreignmemory.
- *
- * Calling exec(2) in a child will safely (and reliably) reclaim any
- * resources which were allocated via a xenforeignmemory_handle in the
- * parent.
- *
- * A child which does not call exec(2) may safely call
- * xenforeignmemory_close() on a xenforeignmemory_handle inherited
- * from their parent. This will attempt to reclaim any resources
- * associated with that handle. Note that in some implementations this
- * reclamation may not be completely effective, in this case any
- * affected resources remain allocated.
- *
- * Calling xenforeignmemory_close() is the only safe operation on a
- * xenforeignmemory_handle which has been inherited.
- */
-xenforeignmemory_handle *xenforeignmemory_open(struct xentoollog_logger *logger,
- unsigned open_flags);
-
-/*
- * Close a handle previously allocated with xenforeignmemory_open().
- *
- * Under normal circumstances (i.e. not in the child after a fork)
- * xenforeignmemory_unmap() should be used on all mappings allocated
- * by xenforeignmemory_map() prior to closing the handle in order to
- * free up resources associated with those mappings.
- *
- * This is the only function which may be safely called on a
- * xenforeignmemory_handle in a child after a
- * fork. xenforeignmemory_unmap() must not be called under such
- * circumstances.
- */
-int xenforeignmemory_close(xenforeignmemory_handle *fmem);
-
-/*
- * Maps a range within one domain to a local address range. Mappings
- * must be unmapped with xenforeignmemory_unmap and should follow the
- * same rules as mmap regarding page alignment.
- *
- * prot is as for mmap(2).
- *
- * @arr is an array of @pages gfns to be mapped linearly in the local
- * address range. @err is an (optional) output array used to report
- * per-page errors, as errno values.
- *
- * If @err is given (is non-NULL) then the mapping may partially
- * succeed and return a valid pointer while also using @err to
- * indicate the success (0) or failure (errno value) of the individual
- * pages. The global errno thread local variable is not valid in this
- * case.
- *
- * If @err is not given (is NULL) then on failure to map any page any
- * successful mappings will be undone and NULL will be returned. errno
- * will be set to correspond to the first failure (which may not be
- * the most critical).
- *
- * It is also possible to return NULL due to a complete failure,
- * i.e. failure to even attempt the mapping, in this case the global
- * errno will have been set and the contents of @err (if given) is
- * invalid.
- *
- * Note that it is also possible to return non-NULL with the contents
- * of @err indicating failure to map every page.
- */
-void *xenforeignmemory_map(xenforeignmemory_handle *fmem, uint32_t dom,
- int prot, size_t pages,
- const xen_pfn_t arr[/*pages*/], int err[/*pages*/]);
-
-/*
- * Almost like the previous one but also accepts two additional parameters:
- *
- * @addr is used as a hint address for foreign map placement (see mmap(2)).
- * @flags is a set of additional flags as for mmap(2). Not all of the flag
- * combinations are possible due to implementation details on different
- * platforms.
- */
-void *xenforeignmemory_map2(xenforeignmemory_handle *fmem, uint32_t dom,
- void *addr, int prot, int flags, size_t pages,
- const xen_pfn_t arr[/*pages*/], int err[/*pages*/]);
-
-/*
- * Unmap a mapping previous created with xenforeignmemory_map().
- *
- * Returns 0 on success on failure sets errno and returns -1.
- */
-int xenforeignmemory_unmap(xenforeignmemory_handle *fmem,
- void *addr, size_t pages);
-
-/**
- * This function restricts the use of this handle to the specified
- * domain.
- *
- * @parm fmem handle to the open foreignmemory interface
- * @parm domid the domain id
- * @return 0 on success, -1 on failure.
- */
-int xenforeignmemory_restrict(xenforeignmemory_handle *fmem,
- domid_t domid);
-
-typedef struct xenforeignmemory_resource_handle xenforeignmemory_resource_handle;
-
-/**
- * This function maps a guest resource.
- *
- * @parm fmem handle to the open foreignmemory interface
- * @parm domid the domain id
- * @parm type the resource type
- * @parm id the type-specific resource identifier
- * @parm frame base frame index within the resource
- * @parm nr_frames number of frames to map
- * @parm paddr pointer to an address passed through to mmap(2)
- * @parm prot passed through to mmap(2)
- * @parm POSIX-only flags passed through to mmap(2)
- * @return pointer to foreignmemory resource handle on success, NULL on
- * failure
- *
- * *paddr is used, on entry, as a hint address for foreign map placement
- * (see mmap(2)) so should be set to NULL if no specific placement is
- * required. On return *paddr contains the address where the resource is
- * mapped.
- * As for xenforeignmemory_map2() flags is a set of additional flags
- * for mmap(2). Not all of the flag combinations are possible due to
- * implementation details on different platforms.
- */
-xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
- xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
- unsigned int id, unsigned long frame, unsigned long nr_frames,
- void **paddr, int prot, int flags);
-
-/**
- * This function releases a previously acquired resource.
- *
- * @parm fmem handle to the open foreignmemory interface
- * @parm fres handle to the acquired resource
- *
- * Returns 0 on success on failure sets errno and returns -1.
- */
-int xenforeignmemory_unmap_resource(
- xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres);
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
SRCS-$(CONFIG_NetBSD) += gnttab_unimp.c gntshr_unimp.c
include $(XEN_ROOT)/tools/libs/libs.mk
-
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxengnttab)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
+++ /dev/null
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * Split off from:
- * xenctrl.h
- *
- * A library for low-level access to the Xen control interfaces.
- *
- * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
- * Copyright (c) 2018, Oleksandr Andrushchenko, EPAM Systems Inc.
- */
-#ifndef XENGNTTAB_H
-#define XENGNTTAB_H
-
-#include <stdint.h>
-
-#include <xen/grant_table.h>
-#include <xen/event_channel.h>
-
-/* Callers who don't care don't need to #include <xentoollog.h> */
-struct xentoollog_logger;
-
-/*
- * PRODUCING AND CONSUMING GRANT REFERENCES
- * ========================================
- *
- * The xengnttab library contains two distinct interfaces, each with
- * their own distinct handle type and entry points. The represent the
- * two sides of the grant table interface, producer (gntshr) and
- * consumer (gnttab).
- *
- * The xengnttab_* interfaces take a xengnttab_handle and provide
- * mechanisms for consuming (i.e. mapping or copying to/from) grant
- * references provided by a peer.
- *
- * The xengntshr_* interfaces take a xengntshr_handle and provide a
- * mechanism to produce grantable memory and grant references to that
- * memory, which can be handed to some peer.
- *
- * UNMAP NOTIFICATION
- * ==================
- *
- * The xengnt{tab,shr}_*_notify interfaces implement a cooperative
- * interface which is intended to allow the underlying kernel
- * interfaces to attempt to notify the peer to perform graceful
- * teardown upon failure (i.e. crash or exit) of the process on their
- * end.
- *
- * These interfaces operate on a single page only and are intended for
- * use on the main shared-ring page of a protocol. It is assumed that
- * on teardown both ends would automatically teardown all grants
- * associated with the protocol in addition to the shared ring itself.
- *
- * Each end is able to optionally nominate a byte offset within the
- * shared page or an event channel or both. On exit of the process the
- * underlying kernel driver will zero the byte at the given offset and
- * signal the event channel.
- *
- * The event channel can be the same event channel used for regular
- * ring progress notifications, or may be a dedicated event channel.
- *
- * Both ends may share the same notification byte offset within the
- * shared page, or may have dedicated "client" and "server" status
- * bytes.
- *
- * Since the byte is cleared on shutdown the protocol must use 0 as
- * the "closed/dead" status, but is permitted to use any other non-0
- * values to indicate various other "live" states (waiting for
- * connection, connected, etc).
- *
- * Both ends are permitted to modify (including clear) their
- * respective status bytes and to signal the event channel themselves
- * from userspace.
- *
- * Depending on the mechanisms which have been registered an
- * the peer may receive a shutdown notification as:
- *
- * - An event channel notification on a dedicated event channel
- * - Observation of the other ends's status byte being cleared
- * (whether in response to an explicit notification or in the
- * course of normal operation).
- *
- * The mechanism should be defined as part of the specific ring
- * protocol.
- *
- * Upon receiving notification of the peer is expected to teardown any
- * resources (and in particular any grant mappings) in a timely
- * manner.
- *
- * NOTE: this protocol is intended to allow for better error behaviour
- * and recovery between two cooperating peers. It does not cover the
- * case of a malicious peer who may continue to hold resources open.
- */
-
-/*
- * Grant Table Interface (making use of grants from other domains)
- */
-
-typedef struct xengntdev_handle xengnttab_handle;
-
-/*
- * Returns a handle onto the grant table driver. Logs errors.
- *
- * Note: After fork(2) a child process must not use any opened gnttab
- * handle inherited from their parent, nor access any grant mapped
- * areas associated with that handle.
- *
- * The child must open a new handle if they want to interact with
- * gnttab.
- *
- * Calling exec(2) in a child will safely (and reliably) reclaim any
- * resources which were allocated via a xengnttab_handle in the parent.
- *
- * A child which does not call exec(2) may safely call
- * xengnttab_close() on a xengnttab_handle inherited from their
- * parent. This will attempt to reclaim any resources associated with
- * that handle. Note that in some implementations this reclamation may
- * not be completely effective, in this case any affected resources
- * remain allocated.
- *
- * Calling xengnttab_close() is the only safe operation on a
- * xengnttab_handle which has been inherited. xengnttab_unmap() must
- * not be called under such circumstances.
- */
-xengnttab_handle *xengnttab_open(struct xentoollog_logger *logger,
- unsigned open_flags);
-
-/*
- * Close a handle previously allocated with xengnttab_open(),
- * including unmaping any current grant maps. Never logs errors.
- *
- * Under normal circumstances (i.e. not in the child after a fork)
- * xengnttab_unmap() should be used on all mappings allocated through
- * a xengnttab_handle prior to closing the handle in order to free up
- * resources associated with those mappings.
- *
- * This is the only function which may be safely called on a
- * xengnttab_handle in a child after a fork.
- */
-int xengnttab_close(xengnttab_handle *xgt);
-
-
-/*
- * Return the fd used internally by xengnttab. selecting on it is not
- * useful. But it could be useful for unusual use cases; perhaps,
- * passing to other programs, calling ioctls on directly, or maybe
- * calling fcntl.
- */
-int xengnttab_fd(xengnttab_handle *xgt);
-
-/**
- * Memory maps a grant reference from one domain to a local address range.
- * Mappings should be unmapped with xengnttab_unmap. Logs errors.
- *
- * @parm xgt a handle on an open grant table interface
- * @parm domid the domain to map memory from
- * @parm ref the grant reference ID to map
- * @parm prot same flag as in mmap()
- */
-void *xengnttab_map_grant_ref(xengnttab_handle *xgt,
- uint32_t domid,
- uint32_t ref,
- int prot);
-
-/**
- * Memory maps one or more grant references from one or more domains to a
- * contiguous local address range. Mappings should be unmapped with
- * xengnttab_unmap. Logs errors.
- *
- * On failure (including partial failure) sets errno and returns
- * NULL. On partial failure no mappings are established (any partial
- * work is undone).
- *
- * @parm xgt a handle on an open grant table interface
- * @parm count the number of grant references to be mapped
- * @parm domids an array of @count domain IDs by which the corresponding @refs
- * were granted
- * @parm refs an array of @count grant references to be mapped
- * @parm prot same flag as in mmap()
- */
-void *xengnttab_map_grant_refs(xengnttab_handle *xgt,
- uint32_t count,
- uint32_t *domids,
- uint32_t *refs,
- int prot);
-
-/**
- * Memory maps one or more grant references from one domain to a
- * contiguous local address range. Mappings should be unmapped with
- * xengnttab_unmap. Logs errors.
- *
- * This call is equivalent to calling @xengnttab_map_grant_refs with a
- * @domids array with every entry set to @domid.
- *
- * @parm xgt a handle on an open grant table interface
- * @parm count the number of grant references to be mapped
- * @parm domid the domain to map memory from
- * @parm refs an array of @count grant references to be mapped
- * @parm prot same flag as in mmap()
- */
-void *xengnttab_map_domain_grant_refs(xengnttab_handle *xgt,
- uint32_t count,
- uint32_t domid,
- uint32_t *refs,
- int prot);
-
-/**
- * Memory maps a grant reference from one domain to a local address range.
- * Mappings should be unmapped with xengnttab_unmap. If notify_offset or
- * notify_port are not -1, this version will attempt to set up an unmap
- * notification at the given offset and event channel. When the page is
- * unmapped, the byte at the given offset will be zeroed and a wakeup will be
- * sent to the given event channel. Logs errors.
- *
- * On failure sets errno and returns NULL.
- *
- * If notify_offset or notify_port are requested and cannot be set up
- * an error will be returned and no mapping will be made.
- *
- * @parm xgt a handle on an open grant table interface
- * @parm domid the domain to map memory from
- * @parm ref the grant reference ID to map
- * @parm prot same flag as in mmap()
- * @parm notify_offset The byte offset in the page to use for unmap
- * notification; -1 for none.
- * @parm notify_port The event channel port to use for unmap notify, or -1
- */
-void *xengnttab_map_grant_ref_notify(xengnttab_handle *xgt,
- uint32_t domid,
- uint32_t ref,
- int prot,
- uint32_t notify_offset,
- evtchn_port_t notify_port);
-
-/**
- * Unmaps the @count pages starting at @start_address, which were
- * mapped by a call to xengnttab_map_grant_ref,
- * xengnttab_map_grant_refs or xengnttab_map_grant_ref_notify. Never
- * logs.
- *
- * If the mapping was made using xengnttab_map_grant_ref_notify() with
- * either notify_offset or notify_port then the peer will be notified.
- */
-int xengnttab_unmap(xengnttab_handle *xgt, void *start_address, uint32_t count);
-
-/**
- * Sets the maximum number of grants that may be mapped by the given
- * instance to @count. Never logs.
- *
- * N.B. This function must be called after opening the handle, and before any
- * other functions are invoked on it.
- *
- * N.B. When variable-length grants are mapped, fragmentation may be observed,
- * and it may not be possible to satisfy requests up to the maximum number
- * of grants.
- */
-int xengnttab_set_max_grants(xengnttab_handle *xgt,
- uint32_t nr_grants);
-
-struct xengnttab_grant_copy_segment {
- union xengnttab_copy_ptr {
- void *virt;
- struct {
- uint32_t ref;
- uint16_t offset;
- uint16_t domid;
- } foreign;
- } source, dest;
- uint16_t len;
- uint16_t flags;
- int16_t status;
-};
-
-typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
-
-/**
- * Copy memory from or to grant references. The information of each operations
- * are contained in 'xengnttab_grant_copy_segment_t'. The @flag value indicate
- * the direction of an operation (GNTCOPY_source_gref\GNTCOPY_dest_gref).
- *
- * For each segment, @virt may cross a page boundary but @offset + @len
- * must not exceed XEN_PAGE_SIZE.
- */
-int xengnttab_grant_copy(xengnttab_handle *xgt,
- uint32_t count,
- xengnttab_grant_copy_segment_t *segs);
-
-/*
- * Flags to be used while requesting memory mapping's backing storage
- * to be allocated with DMA API.
- */
-
-/*
- * The buffer is backed with memory allocated with dma_alloc_wc.
- */
-#define GNTDEV_DMA_FLAG_WC (1 << 0)
-
-/*
- * The buffer is backed with memory allocated with dma_alloc_coherent.
- */
-#define GNTDEV_DMA_FLAG_COHERENT (1 << 1)
-
-/**
- * Create a dma-buf [1] from grant references @refs of count @count provided
- * by the foreign domain @domid with flags @flags.
- *
- * By default dma-buf is backed by system memory pages, but by providing
- * one of the GNTDEV_DMA_FLAG_XXX flags it can also be created as
- * a DMA write-combine or coherent buffer.
- *
- * Returns 0 if dma-buf was successfully created and the corresponding
- * dma-buf's file descriptor is returned in @fd.
- *
- * [1] https://elixir.bootlin.com/linux/latest/source/Documentation/driver-api/dma-buf.rst
- */
-int xengnttab_dmabuf_exp_from_refs(xengnttab_handle *xgt, uint32_t domid,
- uint32_t flags, uint32_t count,
- const uint32_t *refs, uint32_t *fd);
-
-/*
- * This will block until the dma-buf with the file descriptor @fd is
- * released. This is only valid for buffers created with
- * IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS.
- *
- * If withing @wait_to_ms milliseconds the buffer is not released
- * then -ETIMEDOUT error is returned.
- * If the buffer with file descriptor @fd does not exist or has already
- * been released, then -ENOENT is returned. For valid file descriptors
- * this must not be treated as error.
- */
-int xengnttab_dmabuf_exp_wait_released(xengnttab_handle *xgt, uint32_t fd,
- uint32_t wait_to_ms);
-
-/*
- * Import a dma-buf with file descriptor @fd and export granted references
- * to the pages of that dma-buf into array @refs of size @count.
- */
-int xengnttab_dmabuf_imp_to_refs(xengnttab_handle *xgt, uint32_t domid,
- uint32_t fd, uint32_t count, uint32_t *refs);
-
-/*
- * This will close all references to an imported buffer, so it can be
- * released by the owner. This is only valid for buffers created with
- * IOCTL_GNTDEV_DMABUF_IMP_TO_REFS.
- */
-int xengnttab_dmabuf_imp_release(xengnttab_handle *xgt, uint32_t fd);
-
-/*
- * Grant Sharing Interface (allocating and granting pages to others)
- */
-
-typedef struct xengntdev_handle xengntshr_handle;
-
-/*
- * Returns a handle onto the grant sharing driver. Logs errors.
- *
- * Note: After fork(2) a child process must not use any opened gntshr
- * handle inherited from their parent, nor access any grant mapped
- * areas associated with that handle.
- *
- * The child must open a new handle if they want to interact with
- * gntshr.
- *
- * Calling exec(2) in a child will safely (and reliably) reclaim any
- * resources which were allocated via a xengntshr_handle in the
- * parent.
- *
- * A child which does not call exec(2) may safely call
- * xengntshr_close() on a xengntshr_handle inherited from their
- * parent. This will attempt to reclaim any resources associated with
- * that handle. Note that in some implementations this reclamation may
- * not be completely effective, in this case any affected resources
- * remain allocated.
- *
- * Calling xengntshr_close() is the only safe operation on a
- * xengntshr_handle which has been inherited.
- */
-xengntshr_handle *xengntshr_open(struct xentoollog_logger *logger,
- unsigned open_flags);
-
-/*
- * Close a handle previously allocated with xengntshr_open().
- * Never logs errors.
- *
- * Under normal circumstances (i.e. not in the child after a fork)
- * xengntshr_unmap() should be used on all mappings allocated through
- * a xengnttab_handle prior to closing the handle in order to free up
- * resources associated with those mappings.
- *
- * xengntshr_close() is the only function which may be safely called
- * on a xengntshr_handle in a child after a fork. xengntshr_unshare()
- * must not be called under such circumstances.
- */
-int xengntshr_close(xengntshr_handle *xgs);
-
-/*
- * Return the fd used internally by xengntshr. selecting on it is not
- * useful. But it could be useful for unusual use cases; perhaps,
- * passing to other programs, calling ioctls on directly, or maybe
- * calling fcntl.
- */
-int xengntshr_fd(xengntshr_handle *xgs);
-
-/**
- * Allocates and shares pages with another domain.
- *
- * On failure sets errno and returns NULL. No allocations will be made.
- *
- * This library only provides functionality for sharing memory
- * allocated via this call, memory from elsewhere (malloc, mmap etc)
- * cannot be shared here.
- *
- * @parm xgs a handle to an open grant sharing instance
- * @parm domid the domain to share memory with
- * @parm count the number of pages to share
- * @parm refs the grant references of the pages (output)
- * @parm writable true if the other domain can write to the pages
- * @return local mapping of the pages
- */
-void *xengntshr_share_pages(xengntshr_handle *xgs, uint32_t domid,
- int count, uint32_t *refs, int writable);
-
-/**
- * Creates and shares a page with another domain, with unmap notification.
- *
- * @parm xgs a handle to an open grant sharing instance
- * @parm domid the domain to share memory with
- * @parm refs the grant reference of the pages (output)
- * @parm writable true if the other domain can write to the page
- * @parm notify_offset The byte offset in the page to use for unmap
- * notification; -1 for none.
- * @parm notify_port The event channel port to use for unmap notify, or -1
- * @return local mapping of the page
- */
-void *xengntshr_share_page_notify(xengntshr_handle *xgs, uint32_t domid,
- uint32_t *ref, int writable,
- uint32_t notify_offset,
- evtchn_port_t notify_port);
-
-/**
- * Unmaps the @count pages starting at @start_address, which were
- * mapped by a call to xengntshr_share_*. Never logs.
- *
- * If the mapping was made using xengntshr_share_page_notify() with
- * either notify_offset or notify_port then the peer will be notified.
- */
-int xengntshr_unshare(xengntshr_handle *xgs, void *start_address, uint32_t count);
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
$(LIB_OBJS) $(PIC_OBJS): $(LINK_FILES)
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenctrl)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
-
.PHONY: cleanlocal
cleanlocal:
rm -f libxenguest.map
+++ /dev/null
-/******************************************************************************
- * xenguest.h
- *
- * A library for guest domain management in Xen.
- *
- * Copyright (c) 2003-2004, K A Fraser.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef XENGUEST_H
-#define XENGUEST_H
-
-#define XC_NUMA_NO_NODE (~0U)
-
-#define XCFLAGS_LIVE (1 << 0)
-#define XCFLAGS_DEBUG (1 << 1)
-
-#define X86_64_B_SIZE 64
-#define X86_32_B_SIZE 32
-
-#define X86_HVM_NR_SPECIAL_PAGES 8
-#define X86_HVM_END_SPECIAL_REGION 0xff000u
-#define XG_MAX_MODULES 2
-
-/* --- typedefs and structs ---------------------------------------- */
-
-typedef uint64_t xen_vaddr_t;
-typedef uint64_t xen_paddr_t;
-
-#define PRIpfn PRI_xen_pfn
-
-struct xc_dom_seg {
- xen_vaddr_t vstart;
- xen_vaddr_t vend;
- xen_pfn_t pfn;
- xen_pfn_t pages;
-};
-
-struct xc_hvm_firmware_module {
- uint8_t *data;
- uint32_t length;
- uint64_t guest_addr_out;
-};
-
-struct xc_dom_mem {
- struct xc_dom_mem *next;
- void *ptr;
- enum {
- XC_DOM_MEM_TYPE_MALLOC_INTERNAL,
- XC_DOM_MEM_TYPE_MALLOC_EXTERNAL,
- XC_DOM_MEM_TYPE_MMAP,
- } type;
- size_t len;
- unsigned char memory[0];
-};
-
-struct xc_dom_phys {
- struct xc_dom_phys *next;
- void *ptr;
- xen_pfn_t first;
- xen_pfn_t count;
-};
-
-struct xc_dom_module {
- void *blob;
- size_t size;
- void *cmdline;
- /* If seg.vstart is non zero then the module will be loaded at that
- * address, otherwise it will automatically placed.
- *
- * If automatic placement is used and the module is gzip
- * compressed then it will be decompressed as it is loaded. If the
- * module has been explicitly placed then it is loaded as is
- * otherwise decompressing risks undoing the manual placement.
- */
- struct xc_dom_seg seg;
-};
-
-struct xc_dom_image {
- /* files */
- void *kernel_blob;
- size_t kernel_size;
- unsigned int num_modules;
- struct xc_dom_module modules[XG_MAX_MODULES];
- void *devicetree_blob;
- size_t devicetree_size;
-
- size_t max_kernel_size;
- size_t max_module_size;
- size_t max_devicetree_size;
-
- /* arguments and parameters */
- char *cmdline;
- size_t cmdline_size;
- uint32_t f_requested[XENFEAT_NR_SUBMAPS];
-
- /* info from (elf) kernel image */
- struct elf_dom_parms *parms;
- char *guest_type;
-
- /* memory layout */
- struct xc_dom_seg kernel_seg;
- struct xc_dom_seg p2m_seg;
- struct xc_dom_seg pgtables_seg;
- struct xc_dom_seg devicetree_seg;
- struct xc_dom_seg start_info_seg;
- xen_pfn_t start_info_pfn;
- xen_pfn_t console_pfn;
- xen_pfn_t xenstore_pfn;
- xen_pfn_t shared_info_pfn;
- xen_pfn_t bootstack_pfn;
- xen_pfn_t pfn_alloc_end;
- xen_vaddr_t virt_alloc_end;
- xen_vaddr_t bsd_symtab_start;
-
- /*
- * initrd parameters as specified in start_info page
- * Depending on capabilities of the booted kernel this may be a virtual
- * address or a pfn. Type is neutral and large enough to hold a virtual
- * address of a 64 bit kernel even with 32 bit toolstack.
- */
- uint64_t initrd_start;
- uint64_t initrd_len;
-
- unsigned int alloc_bootstack;
- xen_vaddr_t virt_pgtab_end;
-
- /* other state info */
- uint32_t f_active[XENFEAT_NR_SUBMAPS];
-
- /*
- * pv_p2m is specific to x86 PV guests, and maps GFNs to MFNs. It is
- * eventually copied into guest context.
- */
- xen_pfn_t *pv_p2m;
-
- /* physical memory
- *
- * An x86 PV guest has one or more blocks of physical RAM,
- * consisting of total_pages starting at 0. The start address and
- * size of each block is controlled by vNUMA structures.
- *
- * An ARM guest has GUEST_RAM_BANKS regions of RAM, with
- * rambank_size[i] pages in each. The lowest RAM address
- * (corresponding to the base of the p2m arrays above) is stored
- * in rambase_pfn.
- */
- xen_pfn_t rambase_pfn;
- xen_pfn_t total_pages;
- xen_pfn_t p2m_size; /* number of pfns covered by p2m */
- struct xc_dom_phys *phys_pages;
-#if defined (__arm__) || defined(__aarch64__)
- xen_pfn_t rambank_size[GUEST_RAM_BANKS];
-#endif
-
- /* malloc memory pool */
- struct xc_dom_mem *memblocks;
-
- /* memory footprint stats */
- size_t alloc_malloc;
- size_t alloc_mem_map;
- size_t alloc_file_map;
- size_t alloc_domU_map;
-
- /* misc xen domain config stuff */
- unsigned long flags;
- unsigned int console_evtchn;
- unsigned int xenstore_evtchn;
- uint32_t console_domid;
- uint32_t xenstore_domid;
- xen_pfn_t shared_info_mfn;
-
- xc_interface *xch;
- uint32_t guest_domid;
- int claim_enabled; /* 0 by default, 1 enables it */
-
- int xen_version;
- xen_capabilities_info_t xen_caps;
-
- /* kernel loader, arch hooks */
- struct xc_dom_loader *kernel_loader;
- void *private_loader;
-
- /* vNUMA information */
- xen_vmemrange_t *vmemranges;
- unsigned int nr_vmemranges;
- unsigned int *vnode_to_pnode;
- unsigned int nr_vnodes;
-
- /* domain type/architecture specific data */
- void *arch_private;
-
- /* kernel loader */
- struct xc_dom_arch *arch_hooks;
- /* allocate up to pfn_alloc_end */
- int (*allocate) (struct xc_dom_image * dom);
-
- /* Container type (HVM or PV). */
- enum {
- XC_DOM_PV_CONTAINER,
- XC_DOM_HVM_CONTAINER,
- } container_type;
-
- /* HVM specific fields. */
- xen_pfn_t target_pages;
- xen_paddr_t mmio_start;
- xen_paddr_t mmio_size;
- xen_paddr_t lowmem_end;
- xen_paddr_t highmem_end;
- xen_pfn_t vga_hole_size;
-
- /* If unset disables the setup of the IOREQ pages. */
- bool device_model;
-
- /* BIOS/Firmware passed to HVMLOADER */
- struct xc_hvm_firmware_module system_firmware_module;
-
- /* Extra ACPI tables */
-#define MAX_ACPI_MODULES 4
- struct xc_hvm_firmware_module acpi_modules[MAX_ACPI_MODULES];
-
- /* Extra SMBIOS structures passed to HVMLOADER */
- struct xc_hvm_firmware_module smbios_module;
-
-#if defined(__i386__) || defined(__x86_64__)
- struct e820entry *e820;
- unsigned int e820_entries;
-#endif
-
- xen_pfn_t vuart_gfn;
-
- /* Number of vCPUs */
- unsigned int max_vcpus;
-};
-
-/* --- arch specific hooks ----------------------------------------- */
-
-struct xc_dom_arch {
- int (*alloc_magic_pages) (struct xc_dom_image * dom);
-
- /* pagetable setup - x86 PV only */
- int (*alloc_pgtables) (struct xc_dom_image * dom);
- int (*alloc_p2m_list) (struct xc_dom_image * dom);
- int (*setup_pgtables) (struct xc_dom_image * dom);
-
- /* arch-specific data structs setup */
- /* in Mini-OS environment start_info might be a macro, avoid collision. */
-#undef start_info
- int (*start_info) (struct xc_dom_image * dom);
- int (*shared_info) (struct xc_dom_image * dom, void *shared_info);
- int (*vcpu) (struct xc_dom_image * dom);
- int (*bootearly) (struct xc_dom_image * dom);
- int (*bootlate) (struct xc_dom_image * dom);
-
- /* arch-specific memory initialization. */
- int (*meminit) (struct xc_dom_image * dom);
-
- char *guest_type;
- char *native_protocol;
- int page_shift;
- int sizeof_pfn;
- int p2m_base_supported;
- int arch_private_size;
-
- struct xc_dom_arch *next;
-};
-void xc_dom_register_arch_hooks(struct xc_dom_arch *hooks);
-
-#define XC_DOM_PAGE_SHIFT(dom) ((dom)->arch_hooks->page_shift)
-#define XC_DOM_PAGE_SIZE(dom) (1LL << (dom)->arch_hooks->page_shift)
-
-/* --- main functions ---------------------------------------------- */
-
-struct xc_dom_image *xc_dom_allocate(xc_interface *xch,
- const char *cmdline, const char *features);
-void xc_dom_release_phys(struct xc_dom_image *dom);
-void xc_dom_release(struct xc_dom_image *dom);
-int xc_dom_rambase_init(struct xc_dom_image *dom, uint64_t rambase);
-int xc_dom_mem_init(struct xc_dom_image *dom, unsigned int mem_mb);
-
-/* Set this larger if you have enormous modules/kernels. Note that
- * you should trust all kernels not to be maliciously large (e.g. to
- * exhaust all dom0 memory) if you do this (see CVE-2012-4544 /
- * XSA-25). You can also set the default independently for
- * modules/kernels in xc_dom_allocate() or call
- * xc_dom_{kernel,module}_max_size.
- */
-#ifndef XC_DOM_DECOMPRESS_MAX
-#define XC_DOM_DECOMPRESS_MAX (1024*1024*1024) /* 1GB */
-#endif
-
-int xc_dom_kernel_check_size(struct xc_dom_image *dom, size_t sz);
-int xc_dom_kernel_max_size(struct xc_dom_image *dom, size_t sz);
-
-int xc_dom_module_max_size(struct xc_dom_image *dom, size_t sz);
-
-int xc_dom_devicetree_max_size(struct xc_dom_image *dom, size_t sz);
-
-size_t xc_dom_check_gzip(xc_interface *xch,
- void *blob, size_t ziplen);
-int xc_dom_do_gunzip(xc_interface *xch,
- void *src, size_t srclen, void *dst, size_t dstlen);
-int xc_dom_try_gunzip(struct xc_dom_image *dom, void **blob, size_t * size);
-
-int xc_dom_kernel_file(struct xc_dom_image *dom, const char *filename);
-int xc_dom_module_file(struct xc_dom_image *dom, const char *filename,
- const char *cmdline);
-int xc_dom_kernel_mem(struct xc_dom_image *dom, const void *mem,
- size_t memsize);
-int xc_dom_module_mem(struct xc_dom_image *dom, const void *mem,
- size_t memsize, const char *cmdline);
-int xc_dom_devicetree_file(struct xc_dom_image *dom, const char *filename);
-int xc_dom_devicetree_mem(struct xc_dom_image *dom, const void *mem,
- size_t memsize);
-
-int xc_dom_parse_image(struct xc_dom_image *dom);
-int xc_dom_set_arch_hooks(struct xc_dom_image *dom);
-int xc_dom_build_image(struct xc_dom_image *dom);
-
-int xc_dom_boot_xen_init(struct xc_dom_image *dom, xc_interface *xch,
- uint32_t domid);
-int xc_dom_boot_mem_init(struct xc_dom_image *dom);
-void *xc_dom_boot_domU_map(struct xc_dom_image *dom, xen_pfn_t pfn,
- xen_pfn_t count);
-int xc_dom_boot_image(struct xc_dom_image *dom);
-int xc_dom_compat_check(struct xc_dom_image *dom);
-int xc_dom_gnttab_init(struct xc_dom_image *dom);
-int xc_dom_gnttab_seed(xc_interface *xch, uint32_t guest_domid,
- bool is_hvm,
- xen_pfn_t console_gfn,
- xen_pfn_t xenstore_gfn,
- uint32_t console_domid,
- uint32_t xenstore_domid);
-bool xc_dom_translated(const struct xc_dom_image *dom);
-
-/* --- debugging bits ---------------------------------------------- */
-
-int xc_dom_loginit(xc_interface *xch);
-
-void xc_dom_printf(xc_interface *xch, const char *fmt, ...)
- __attribute__ ((format(printf, 2, 3)));
-void xc_dom_panic_func(xc_interface *xch,
- const char *file, int line, xc_error_code err,
- const char *fmt, ...)
- __attribute__ ((format(printf, 5, 6)));
-
-#define xc_dom_panic(xch, err, fmt, args...) \
- xc_dom_panic_func(xch, __FILE__, __LINE__, err, fmt, ## args)
-#define xc_dom_trace(mark) \
- xc_dom_printf("%s:%d: trace %s\n", __FILE__, __LINE__, mark)
-
-void xc_dom_log_memory_footprint(struct xc_dom_image *dom);
-
-/* --- simple memory pool ------------------------------------------ */
-
-void *xc_dom_malloc(struct xc_dom_image *dom, size_t size);
-int xc_dom_register_external(struct xc_dom_image *dom, void *ptr, size_t size);
-void *xc_dom_malloc_page_aligned(struct xc_dom_image *dom, size_t size);
-void *xc_dom_malloc_filemap(struct xc_dom_image *dom,
- const char *filename, size_t * size,
- const size_t max_size);
-char *xc_dom_strdup(struct xc_dom_image *dom, const char *str);
-
-/* --- alloc memory pool ------------------------------------------- */
-
-xen_pfn_t xc_dom_alloc_page(struct xc_dom_image *dom, char *name);
-int xc_dom_alloc_segment(struct xc_dom_image *dom,
- struct xc_dom_seg *seg, char *name,
- xen_vaddr_t start, xen_vaddr_t size);
-
-/* --- misc bits --------------------------------------------------- */
-
-void *xc_dom_pfn_to_ptr(struct xc_dom_image *dom, xen_pfn_t first,
- xen_pfn_t count);
-void *xc_dom_pfn_to_ptr_retcount(struct xc_dom_image *dom, xen_pfn_t first,
- xen_pfn_t count, xen_pfn_t *count_out);
-void xc_dom_unmap_one(struct xc_dom_image *dom, xen_pfn_t pfn);
-void xc_dom_unmap_all(struct xc_dom_image *dom);
-void *xc_dom_vaddr_to_ptr(struct xc_dom_image *dom,
- xen_vaddr_t vaddr, size_t *safe_region_out);
-uint64_t xc_dom_virt_base(struct xc_dom_image *dom);
-uint64_t xc_dom_virt_entry(struct xc_dom_image *dom);
-uint64_t xc_dom_virt_hypercall(struct xc_dom_image *dom);
-char *xc_dom_guest_os(struct xc_dom_image *dom);
-bool xc_dom_feature_get(struct xc_dom_image *dom, unsigned int nr);
-
-static inline void *xc_dom_seg_to_ptr_pages(struct xc_dom_image *dom,
- struct xc_dom_seg *seg,
- xen_pfn_t *pages_out)
-{
- void *retval;
-
- retval = xc_dom_pfn_to_ptr(dom, seg->pfn, seg->pages);
-
- *pages_out = retval ? seg->pages : 0;
- return retval;
-}
-
-static inline void *xc_dom_seg_to_ptr(struct xc_dom_image *dom,
- struct xc_dom_seg *seg)
-{
- xen_pfn_t dummy;
-
- return xc_dom_seg_to_ptr_pages(dom, seg, &dummy);
-}
-
-static inline xen_pfn_t xc_dom_p2m(struct xc_dom_image *dom, xen_pfn_t pfn)
-{
- if ( xc_dom_translated(dom) )
- return pfn;
-
- /* x86 PV only now. */
- if ( pfn >= dom->total_pages )
- return INVALID_MFN;
-
- return dom->pv_p2m[pfn];
-}
-
-/*
- * User not using xc_suspend_* / xc_await_suspent may not want to
- * include the full libxenevtchn API here.
- */
-struct xenevtchn_handle;
-
-/* For save's precopy_policy(). */
-struct precopy_stats
-{
- unsigned int iteration;
- unsigned int total_written;
- long dirty_count; /* -1 if unknown */
-};
-
-/*
- * A precopy_policy callback may not be running in the same address
- * space as libxc an so precopy_stats is passed by value.
- */
-typedef int (*precopy_policy_t)(struct precopy_stats, void *);
-
-/* callbacks provided by xc_domain_save */
-struct save_callbacks {
- /*
- * Called after expiration of checkpoint interval,
- * to suspend the guest.
- */
- int (*suspend)(void *data);
-
- /*
- * Called before and after every batch of page data sent during
- * the precopy phase of a live migration to ask the caller what
- * to do next based on the current state of the precopy migration.
- *
- * Should return one of the values listed below:
- */
-#define XGS_POLICY_ABORT (-1) /* Abandon the migration entirely
- * and tidy up. */
-#define XGS_POLICY_CONTINUE_PRECOPY 0 /* Remain in the precopy phase. */
-#define XGS_POLICY_STOP_AND_COPY 1 /* Immediately suspend and transmit the
- * remaining dirty pages. */
- precopy_policy_t precopy_policy;
-
- /*
- * Called after the guest's dirty pages have been
- * copied into an output buffer.
- * Callback function resumes the guest & the device model,
- * returns to xc_domain_save.
- * xc_domain_save then flushes the output buffer, while the
- * guest continues to run.
- */
- int (*postcopy)(void *data);
-
- /*
- * Called after the memory checkpoint has been flushed
- * out into the network. Typical actions performed in this
- * callback include:
- * (a) send the saved device model state (for HVM guests),
- * (b) wait for checkpoint ack
- * (c) release the network output buffer pertaining to the acked checkpoint.
- * (c) sleep for the checkpoint interval.
- *
- * returns:
- * 0: terminate checkpointing gracefully
- * 1: take another checkpoint
- */
- int (*checkpoint)(void *data);
-
- /*
- * Called after the checkpoint callback.
- *
- * returns:
- * 0: terminate checkpointing gracefully
- * 1: take another checkpoint
- */
- int (*wait_checkpoint)(void *data);
-
- /* Enable qemu-dm logging dirty pages to xen */
- int (*switch_qemu_logdirty)(uint32_t domid, unsigned enable, void *data); /* HVM only */
-
- /* to be provided as the last argument to each callback function */
- void *data;
-};
-
-/* Type of stream. Plain, or using a continuous replication protocol? */
-typedef enum {
- XC_STREAM_PLAIN,
- XC_STREAM_REMUS,
- XC_STREAM_COLO,
-} xc_stream_type_t;
-
-/**
- * This function will save a running domain.
- *
- * @param xch a handle to an open hypervisor interface
- * @param io_fd the file descriptor to save a domain to
- * @param dom the id of the domain
- * @param flags XCFLAGS_xxx
- * @param stream_type XC_STREAM_PLAIN if the far end of the stream
- * doesn't use checkpointing
- * @param recv_fd Only used for XC_STREAM_COLO. Contains backchannel from
- * the destination side.
- * @return 0 on success, -1 on failure
- */
-int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom,
- uint32_t flags, struct save_callbacks *callbacks,
- xc_stream_type_t stream_type, int recv_fd);
-
-/* callbacks provided by xc_domain_restore */
-struct restore_callbacks {
- /*
- * Called once the STATIC_DATA_END record has been received/inferred.
- *
- * For compatibility with older streams, provides a list of static data
- * expected to be found in the stream, which was missing. A higher level
- * toolstack is responsible for providing any necessary compatibiltiy.
- */
-#define XGR_SDD_MISSING_CPUID (1 << 0)
-#define XGR_SDD_MISSING_MSR (1 << 1)
- int (*static_data_done)(unsigned int missing, void *data);
-
- /* Called after a new checkpoint to suspend the guest. */
- int (*suspend)(void *data);
-
- /*
- * Called after the secondary vm is ready to resume.
- * Callback function resumes the guest & the device model,
- * returns to xc_domain_restore.
- */
- int (*postcopy)(void *data);
-
- /*
- * A checkpoint record has been found in the stream.
- * returns:
- */
-#define XGR_CHECKPOINT_ERROR 0 /* Terminate processing */
-#define XGR_CHECKPOINT_SUCCESS 1 /* Continue reading more data from the stream */
-#define XGR_CHECKPOINT_FAILOVER 2 /* Failover and resume VM */
- int (*checkpoint)(void *data);
-
- /*
- * Called after the checkpoint callback.
- *
- * returns:
- * 0: terminate checkpointing gracefully
- * 1: take another checkpoint
- */
- int (*wait_checkpoint)(void *data);
-
- /*
- * callback to send store gfn and console gfn to xl
- * if we want to resume vm before xc_domain_save()
- * exits.
- */
- void (*restore_results)(xen_pfn_t store_gfn, xen_pfn_t console_gfn,
- void *data);
-
- /* to be provided as the last argument to each callback function */
- void *data;
-};
-
-/**
- * This function will restore a saved domain.
- *
- * Domain is restored in a suspended state ready to be unpaused.
- *
- * @param xch a handle to an open hypervisor interface
- * @param io_fd the file descriptor to restore a domain from
- * @param dom the id of the domain
- * @param store_evtchn the xenstore event channel for this domain to use
- * @param store_mfn filled with the gfn of the store page
- * @param store_domid the backend domain for xenstore
- * @param console_evtchn the console event channel for this domain to use
- * @param console_mfn filled with the gfn of the console page
- * @param console_domid the backend domain for xenconsole
- * @param stream_type XC_STREAM_PLAIN if the far end of the stream is using
- * checkpointing
- * @param callbacks non-NULL to receive a callback to restore toolstack
- * specific data
- * @param send_back_fd Only used for XC_STREAM_COLO. Contains backchannel to
- * the source side.
- * @return 0 on success, -1 on failure
- */
-int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
- unsigned int store_evtchn, unsigned long *store_mfn,
- uint32_t store_domid, unsigned int console_evtchn,
- unsigned long *console_mfn, uint32_t console_domid,
- xc_stream_type_t stream_type,
- struct restore_callbacks *callbacks, int send_back_fd);
-
-/**
- * This function will create a domain for a paravirtualized Linux
- * using file names pointing to kernel and ramdisk
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the id of the domain
- * @parm mem_mb memory size in megabytes
- * @parm image_name name of the kernel image file
- * @parm ramdisk_name name of the ramdisk image file
- * @parm cmdline command line string
- * @parm flags domain creation flags
- * @parm store_evtchn the store event channel for this domain to use
- * @parm store_mfn returned with the mfn of the store page
- * @parm console_evtchn the console event channel for this domain to use
- * @parm conole_mfn returned with the mfn of the console page
- * @return 0 on success, -1 on failure
- */
-int xc_linux_build(xc_interface *xch,
- uint32_t domid,
- unsigned int mem_mb,
- const char *image_name,
- const char *ramdisk_name,
- const char *cmdline,
- const char *features,
- unsigned long flags,
- unsigned int store_evtchn,
- unsigned long *store_mfn,
- unsigned int console_evtchn,
- unsigned long *console_mfn);
-
-/*
- * Sets *lockfd to -1.
- * Has deallocated everything even on error.
- */
-int xc_suspend_evtchn_release(xc_interface *xch,
- struct xenevtchn_handle *xce,
- uint32_t domid, int suspend_evtchn, int *lockfd);
-
-/**
- * This function eats the initial notification.
- * xce must not be used for anything else
- * See xc_suspend_evtchn_init_sane re lockfd.
- */
-int xc_suspend_evtchn_init_exclusive(xc_interface *xch,
- struct xenevtchn_handle *xce,
- uint32_t domid, int port, int *lockfd);
-
-/* xce must not be used for anything else */
-int xc_await_suspend(xc_interface *xch, struct xenevtchn_handle *xce,
- int suspend_evtchn);
-
-/**
- * The port will be signaled immediately after this call
- * The caller should check the domain status and look for the next event
- * On success, *lockfd will be set to >=0 and *lockfd must be preserved
- * and fed to xc_suspend_evtchn_release. (On error *lockfd is
- * undefined and xc_suspend_evtchn_release is not allowed.)
- */
-int xc_suspend_evtchn_init_sane(xc_interface *xch,
- struct xenevtchn_handle *xce,
- uint32_t domid, int port, int *lockfd);
-
-int xc_mark_page_online(xc_interface *xch, unsigned long start,
- unsigned long end, uint32_t *status);
-
-int xc_mark_page_offline(xc_interface *xch, unsigned long start,
- unsigned long end, uint32_t *status);
-
-int xc_query_page_offline_status(xc_interface *xch, unsigned long start,
- unsigned long end, uint32_t *status);
-
-int xc_exchange_page(xc_interface *xch, uint32_t domid, xen_pfn_t mfn);
-
-
-/**
- * Memory related information, such as PFN types, the P2M table,
- * the guest word width and the guest page table levels.
- */
-struct xc_domain_meminfo {
- unsigned int pt_levels;
- unsigned int guest_width;
- xen_pfn_t *pfn_type;
- xen_pfn_t *p2m_table;
- unsigned long p2m_size;
-};
-
-int xc_map_domain_meminfo(xc_interface *xch, uint32_t domid,
- struct xc_domain_meminfo *minfo);
-
-int xc_unmap_domain_meminfo(xc_interface *xch, struct xc_domain_meminfo *mem);
-
-/**
- * This function map m2p table
- * @parm xch a handle to an open hypervisor interface
- * @parm max_mfn the max pfn
- * @parm prot the flags to map, such as read/write etc
- * @parm mfn0 return the first mfn, can be NULL
- * @return mapped m2p table on success, NULL on failure
- */
-xen_pfn_t *xc_map_m2p(xc_interface *xch,
- unsigned long max_mfn,
- int prot,
- unsigned long *mfn0);
-#endif /* XENGUEST_H */
SRCS-y += core.c
include ../libs.mk
-
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenhypfs)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
+++ /dev/null
-/*
- * Copyright (c) 2019 SUSE Software Solutions Germany GmbH
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef XENHYPFS_H
-#define XENHYPFS_H
-
-#include <stdbool.h>
-#include <stdint.h>
-#include <sys/types.h>
-
-/* Callers who don't care don't need to #include <xentoollog.h> */
-struct xentoollog_logger;
-
-typedef struct xenhypfs_handle xenhypfs_handle;
-
-enum xenhypfs_type {
- xenhypfs_type_dir,
- xenhypfs_type_blob,
- xenhypfs_type_string,
- xenhypfs_type_uint,
- xenhypfs_type_int,
- xenhypfs_type_bool,
-};
-
-enum xenhypfs_encoding {
- xenhypfs_enc_plain,
- xenhypfs_enc_gzip
-};
-
-struct xenhypfs_dirent {
- char *name;
- size_t size;
- unsigned short type;
- unsigned short encoding;
- unsigned int flags;
-#define XENHYPFS_FLAG_WRITABLE 0x00000001
-};
-
-xenhypfs_handle *xenhypfs_open(struct xentoollog_logger *logger,
- unsigned int open_flags);
-int xenhypfs_close(xenhypfs_handle *fshdl);
-
-/*
- * Return the raw contents of a Xen hypfs entry and its dirent containing
- * the size, type and encoding.
- * Returned buffer and dirent should be freed via free().
- */
-void *xenhypfs_read_raw(xenhypfs_handle *fshdl, const char *path,
- struct xenhypfs_dirent **dirent);
-
-/*
- * Return the contents of a Xen hypfs entry as a string.
- * Returned buffer should be freed via free().
- */
-char *xenhypfs_read(xenhypfs_handle *fshdl, const char *path);
-
-/*
- * Return the contents of a Xen hypfs directory in form of an array of
- * dirents.
- * Returned buffer should be freed via free().
- */
-struct xenhypfs_dirent *xenhypfs_readdir(xenhypfs_handle *fshdl,
- const char *path,
- unsigned int *num_entries);
-
-/*
- * Write a Xen hypfs entry with a value. The value is converted from a string
- * to the appropriate type.
- */
-int xenhypfs_write(xenhypfs_handle *fshdl, const char *path, const char *val);
-
-#endif /* XENHYPFS_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
PKG_CONFIG_LOCAL := $(PKG_CONFIG_DIR)/$(PKG_CONFIG)
LIBHEADER ?= $(LIB_FILE_NAME).h
-LIBHEADERS = $(foreach h, $(LIBHEADER), include/$(h))
-LIBHEADERSGLOB = $(foreach h, $(LIBHEADER), $(XEN_ROOT)/tools/include/$(h))
+LIBHEADERS = $(foreach h, $(LIBHEADER), $(XEN_INCLUDE)/$(h))
$(PKG_CONFIG_LOCAL): PKG_CONFIG_PREFIX = $(XEN_ROOT)
+$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_INCLUDE)
$(PKG_CONFIG_LOCAL): PKG_CONFIG_LIBDIR = $(CURDIR)
.PHONY: all
.PHONY: headers.chk
endif
-headers.chk: $(LIBHEADERSGLOB) $(AUTOINCS)
+headers.chk: $(AUTOINCS)
libxen$(LIBNAME).map:
echo 'VERS_$(MAJOR).$(MINOR) { global: *; };' >$@
-$(LIBHEADERSGLOB): $(LIBHEADERS)
- for i in $(realpath $(LIBHEADERS)); do ln -sf $$i $(XEN_ROOT)/tools/include; done
-
lib$(LIB_FILE_NAME).a: $(LIB_OBJS)
$(AR) rc $@ $^
rm -f lib$(LIB_FILE_NAME).so.$(MAJOR).$(MINOR) lib$(LIB_FILE_NAME).so.$(MAJOR)
rm -f headers.chk
rm -f $(PKG_CONFIG)
- rm -f $(LIBHEADERSGLOB)
rm -f _paths.h
.PHONY: distclean
TEST_PROG_OBJS += $(foreach t, $(LIBXL_TESTS_PROGS),test_$t.o) test_common.o
TEST_PROGS += $(foreach t, $(LIBXL_TESTS_PROGS),test_$t)
-AUTOINCS = _libxl_list.h _paths.h _libxl_save_msgs_callout.h _libxl_save_msgs_helper.h
+AUTOINCS = $(XEN_INCLUDE)/_libxl_list.h _paths.h _libxl_save_msgs_callout.h _libxl_save_msgs_helper.h
AUTOSRCS = _libxl_save_msgs_callout.c _libxl_save_msgs_helper.c
CLIENTS = testidl libxl-save-helper
include $(XEN_ROOT)/tools/libs/libs.mk
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(CURDIR)
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
-
LDUSELIBS-y += $(PTYFUNCS_LIBS)
LDUSELIBS-$(CONFIG_LIBNL) += $(LIBNL3_LIBS)
LDUSELIBS-$(CONFIG_Linux) += -luuid
$(SAVE_HELPER_OBJS): CFLAGS += $(CFLAGS_libxenctrl) $(CFLAGS_libxenevtchn) $(CFLAGS_libxenguest)
testidl.o: CFLAGS += $(CFLAGS_libxenctrl) $(CFLAGS_libxenlight)
-testidl.c: libxl_types.idl gentest.py include/libxl.h $(AUTOINCS)
+testidl.c: libxl_types.idl gentest.py $(XEN_INCLUDE)/libxl.h $(AUTOINCS)
$(PYTHON) gentest.py libxl_types.idl testidl.c.new
mv testidl.c.new testidl.c
$(PERL) $^
touch $@
-_%.api-for-check: include/%.h $(AUTOINCS)
- $(CC) $(CPPFLAGS) $(CFLAGS) $(CFLAGS_$*.o) -c -E $< $(APPEND_CFLAGS) \
+_libxl.api-for-check: $(XEN_INCLUDE)/libxl.h $(AUTOINCS)
+ $(CC) $(CPPFLAGS) $(CFLAGS) $(CFLAGS_libxl.o) -c -E $< $(APPEND_CFLAGS) \
-DLIBXL_EXTERNAL_CALLERS_ONLY=LIBXL_EXTERNAL_CALLERS_ONLY \
>$@.new
mv -f $@.new $@
-_libxl_list.h: $(XEN_INCLUDE)/xen-external/bsd-sys-queue-h-seddery $(XEN_INCLUDE)/xen-external/bsd-sys-queue.h
- $(PERL) $^ --prefix=libxl >$@.new
- $(call move-if-changed,$@.new,$@)
+$(XEN_INCLUDE)/_libxl_list.h: $(XEN_INCLUDE)/xen-external/bsd-sys-queue-h-seddery $(XEN_INCLUDE)/xen-external/bsd-sys-queue.h
+ $(PERL) $^ --prefix=libxl >$(notdir $@).new
+ $(call move-if-changed,$(notdir $@).new,$@)
_libxl_save_msgs_helper.c _libxl_save_msgs_callout.c \
_libxl_save_msgs_helper.h _libxl_save_msgs_callout.h: \
$(PERL) -w $< $@ >$@.new
$(call move-if-changed,$@.new,$@)
-include/libxl.h: _libxl_types.h _libxl_list.h
-include/libxl_json.h: _libxl_types_json.h
+$(XEN_INCLUDE)/libxl.h: $(XEN_INCLUDE)/_libxl_types.h $(XEN_INCLUDE)/_libxl_list.h
+$(XEN_INCLUDE)/libxl_json.h: $(XEN_INCLUDE)/_libxl_types_json.h
libxl_internal.h: _libxl_types_internal.h _libxl_types_private.h _libxl_types_internal_private.h _paths.h
libxl_internal_json.h: _libxl_types_internal_json.h
xl.h: _paths.h
-$(LIB_OBJS) $(PIC_OBJS) $(LIBXL_TEST_OBJS) $(TEST_PROG_OBJS) $(SAVE_HELPER_OBJS): include/libxl.h
+$(LIB_OBJS) $(PIC_OBJS) $(LIBXL_TEST_OBJS) $(TEST_PROG_OBJS) $(SAVE_HELPER_OBJS): $(XEN_INCLUDE)/libxl.h
$(LIB_OBJS) $(PIC_OBJS) $(LIBXL_TEST_OBJS): libxl_internal.h
_libxl_type%.h _libxl_type%_json.h _libxl_type%_private.h _libxl_type%.c: libxl_type%.idl gentypes.py idl.py
$(call move-if-changed,__libxl_type$(stem)_json.h,_libxl_type$(stem)_json.h)
$(call move-if-changed,__libxl_type$(stem).c,_libxl_type$(stem).c)
-include/_%.h: _%.h
- cp $< $@
+$(XEN_INCLUDE)/_%.h: _%.h
+ $(call move-if-changed,_$*.h,$(XEN_INCLUDE)/_$*.h)
libxenlight_test.so: $(PIC_OBJS) $(LIBXL_TEST_OBJS)
$(CC) $(LDFLAGS) -Wl,$(SONAME_LDFLAG) -Wl,libxenlight.so.$(MAJOR) $(SHLIB_LDFLAGS) -o $@ $^ $(LDUSELIBS) $(APPEND_LDFLAGS)
+++ /dev/null
-/*
- * Copyright (C) 2009 Citrix Ltd.
- * Author Vincent Hanquez <vincent.hanquez@eu.citrix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; version 2.1 only. with the special
- * exception on linking described in file LICENSE.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- */
-
-/*
- * libxl API compatibility
- *
- * From Xen 4.2 onwards the API of libxl will be maintained in a
- * stable manner. This means that it should be possible to write an
- * application against the API provided by libxl in Xen 4.2 and expect
- * that it will continue to compile against future versions of Xen
- * without source modification.
- *
- * In order to make such compatibility possible it is required that
- * application which want to be exposed to a particular API #define
- * LIBXL_API_VERSION before including libxl.h or any other libxl
- * header. The syntax of the LIBXL_API_VERSION is:
- * 0xVVSSEE
- * where ($(XEN_xxx) from xen/Makefile):
- * VV is the Xen major release number, $(XEN_VERSION)
- * SS is the Xen sub version number, $(XEN_SUBVERSION)
- * EE is the Xen extra version digit, first numeric part of
- * $(XEN_EXTRAVERSION) not including the leading "."
- * For example the first stable API version, supported by Xen 4.2.0,
- * is 0x040200.
- *
- * Lack of LIBXL_API_VERSION means "the latest" which will
- * change. Specifying an unknown LIBXL_API_VERSION will result in a
- * compile time error.
- *
- * Identical versions of the libxl API will represented by the version
- * containing the earliest instance of that API. e.g. if 4.2.0 and
- * 4.3.0 contain an identical libxl API then only LIBXL_API_VERSION
- * 0x040200 will be valid.
- *
- * We will try especially hard to avoid changing the API during a
- * stable series, i.e. it should be unusual for the last byte of
- * LIBXL_API_VERSION to be non-zero.
- *
- * In the event that a change is required which cannot be made
- * backwards compatible in this manner a #define of the form
- * LIBXL_HAVE_<interface> will always be added in order to make it
- * possible to write applications which build against any version of
- * libxl. Such changes are expected to be exceptional and used as a
- * last resort. The barrier for backporting such a change to a stable
- * branch will be very high.
- *
- * These guarantees apply only to stable releases of Xen. When an
- * incompatible change is made in the unstable tree then
- * LIBXL_API_VERSION will be bumped to the next expected stable
- * release number on the first such change only. Applications which
- * want to support building against Xen unstable are expected to track
- * API changes in that tree until it is released as a stable release.
- *
- * API compatibility will be maintained for all versions of Xen using
- * the same $(XEN_VERSION) (e.g. throughout a major release).
- */
-
-/* LIBXL_HAVE_PHYSINFO_CAP_PV
- *
- * If this is defined, libxl_physinfo has a "cap_pv" field.
- */
-#define LIBXL_HAVE_PHYSINFO_CAP_PV 1
-
-/* LIBXL_HAVE_CONSOLE_NOTIFY_FD
- *
- * If this is defined, libxl_console_exec and
- * libxl_primary_console_exe take a notify_fd parameter. That
- * parameter will be used to notify the caller that the console is connected.
- */
-#define LIBXL_HAVE_CONSOLE_NOTIFY_FD 1
-
-/* LIBXL_HAVE_CONST_COPY_AND_LENGTH_FUNCTIONS
- *
- * If this is defined, the copy functions have constified src parameter and the
- * length functions accept constified parameter.
- */
-#define LIBXL_HAVE_CONST_COPY_AND_LENGTH_FUNCTIONS 1
-
-/* LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONST_B_INFO
- *
- * If this is defined, libxl_domain_need_memory no longer modifies
- * the b_info paseed in.
- */
-#define LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONST_B_INFO 1
-
-/* LIBXL_HAVE_VNUMA
- *
- * If this is defined the type libxl_vnode_info exists, and a
- * field 'vnuma_nodes' is present in libxl_domain_build_info.
- */
-#define LIBXL_HAVE_VNUMA 1
-
-/* LIBXL_HAVE_USERDATA_UNLINK
- *
- * If it is defined, libxl has a library function called
- * libxl_userdata_unlink.
- */
-#define LIBXL_HAVE_USERDATA_UNLINK 1
-
-/* LIBXL_HAVE_CPUPOOL_QUALIFIER_TO_CPUPOOLID
- *
- * If this is defined, libxl has a library function called
- * libxl_cpupool_qualifier_to_cpupoolid, which takes in a CPU pool
- * qualifier in the form of number or string, then returns the ID of
- * that CPU pool.
- */
-#define LIBXL_HAVE_CPUPOOL_QUALIFIER_TO_CPUPOOLID 1
-
-/* LIBXL_HAVE_CPUPOOL_ADD_REM_CPUMAP
- *
- * If this is defined, libxl has two library functions called
- * libxl_cpupool_cpuadd_cpumap and libxl_cpupool_cpuremove_cpumap,
- * which allow to add to or remove from a cpupool all the cpus
- * specified in a bitmap.
- */
-#define LIBXL_HAVE_CPUPOOL_ADD_REM_CPUMAP 1
-
-/*
- *
- * LIBXL_HAVE_BITMAP_AND_OR
- *
- * If this is defined, libxl has two library functions, libxl_bitmap_and
- * and libxl_bitmap_or to compute the logical and and or of two bitmaps
- */
-#define LIBXL_HAVE_BITMAP_AND_OR 1
-
-/*
- * LIBXL_HAVE_FIRMWARE_PASSTHROUGH indicates the feature for
- * passing in SMBIOS and ACPI firmware to HVM guests is present
- * in the library.
- */
-#define LIBXL_HAVE_FIRMWARE_PASSTHROUGH 1
-
-/*
- * LIBXL_HAVE_DOMAIN_NODEAFFINITY indicates that a 'nodemap' field
- * (of libxl_bitmap type) is present in libxl_domain_build_info,
- * containing the node-affinity for the domain.
- */
-#define LIBXL_HAVE_DOMAIN_NODEAFFINITY 1
-
-/*
- * LIBXL_HAVE_PVUSB indicates functions for plugging in USB devices
- * through pvusb -- both hotplug and at domain creation time..
- */
-#define LIBXL_HAVE_PVUSB 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_HVM_VENDOR_DEVICE indicates that the
- * libxl_vendor_device field is present in the hvm sections of
- * libxl_domain_build_info. This field tells libxl which
- * flavour of xen-pvdevice to enable in QEMU.
- */
-#define LIBXL_HAVE_BUILDINFO_HVM_VENDOR_DEVICE 1
-
-/*
- * The libxl_domain_build_info has the event_channels field.
- */
-#define LIBXL_HAVE_BUILDINFO_EVENT_CHANNELS 1
-
-/*
- * libxl_domain_build_info has the u.hvm.ms_vm_genid field.
- */
-#define LIBXL_HAVE_BUILDINFO_HVM_MS_VM_GENID 1
-
-/*
- * LIBXL_HAVE_VCPUINFO_SOFT_AFFINITY indicates that a 'cpumap_soft'
- * field (of libxl_bitmap type) is present in libxl_vcpuinfo,
- * containing the soft affinity of a vcpu.
- */
-#define LIBXL_HAVE_VCPUINFO_SOFT_AFFINITY 1
-
-/*
- * LIBXL_HAVE_SET_VCPUAFFINITY_FORCE indicates that the
- * libxl_set_vcpuaffinity_force() library call is available.
- */
-#define LIBXL_HAVE_SET_VCPUAFFINITY_FORCE 1
-
-/*
- * LIBXL_HAVE_DEVICE_DISK_DIRECT_IO_SAFE indicates that a
- * 'direct_io_safe' field (of boolean type) is present in
- * libxl_device_disk.
- */
-#define LIBXL_HAVE_DEVICE_DISK_DIRECT_IO_SAFE 1
-
-/*
- * The libxl_device_disk has the discard_enable field.
- */
-#define LIBXL_HAVE_LIBXL_DEVICE_DISK_DISCARD_ENABLE 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_IOMEM_START_GFN indicates that it is possible
- * to specify the start guest frame number used to map a range of I/O
- * memory machine frame numbers via the 'gfn' field (of type uint64)
- * of the 'iomem' structure. An array of iomem structures is embedded
- * in libxl_domain_build_info and used to map the indicated memory
- * ranges during domain build.
- */
-#define LIBXL_HAVE_BUILDINFO_IOMEM_START_GFN 1
-
-/*
- * LIBXL_HAVE_SCHED_RTDS indicates that the RTDS real time scheduler
- * is available. A 'budget' field added in libxl_domain_sched_params.
- */
-#define LIBXL_HAVE_SCHED_RTDS 1
-
-/*
- * LIBXL_HAVE_SCHED_NULL indicates that the 'null' static scheduler
- * is available.
- */
-#define LIBXL_HAVE_SCHED_NULL 1
-
-/*
- * libxl_domain_build_info has u.hvm.viridian_enable and _disable bitmaps
- * of the specified width.
- */
-#define LIBXL_HAVE_BUILDINFO_HVM_VIRIDIAN_ENABLE_DISABLE 1
-#define LIBXL_BUILDINFO_HVM_VIRIDIAN_ENABLE_DISABLE_WIDTH 64
-
-/*
- * libxl_domain_build_info has the u.hvm.mmio_hole_memkb field.
- */
-#define LIBXL_HAVE_BUILDINFO_HVM_MMIO_HOLE_MEMKB 1
-
-/*
- * libxl_domain_info returns ERROR_DOMAIN_NOTFOUND if the domain
- * is not present, instead of ERROR_INVAL.
- */
-#define LIBXL_HAVE_ERROR_DOMAIN_NOTFOUND 1
-
-/*
- * libxl_domain_build_info has device_tree and libxl_device_dtdev
- * exists. This mean Device Tree passthrough is supported for ARM
- */
-#define LIBXL_HAVE_DEVICETREE_PASSTHROUGH 1
-
-/*
- * libxl_domain_build_info has device_model_user to specify the user to
- * run the device model with. See docs/misc/qemu-deprivilege.txt.
- */
-#define LIBXL_HAVE_DEVICE_MODEL_USER 1
-
-/*
- * libxl_vcpu_sched_params is used to store per-vcpu params.
- */
-#define LIBXL_HAVE_VCPU_SCHED_PARAMS 1
-
-/*
- * LIBXL_HAVE_SCHED_RTDS_VCPU_PARAMS indicates RTDS scheduler
- * now supports per-vcpu settings.
- */
-#define LIBXL_HAVE_SCHED_RTDS_VCPU_PARAMS 1
-
-/*
- * LIBXL_HAVE_SCHED_RTDS_VCPU_EXTRA indicates RTDS scheduler
- * now supports per-vcpu extratime settings.
- */
-#define LIBXL_HAVE_SCHED_RTDS_VCPU_EXTRA 1
-
-/*
- * libxl_domain_build_info has the arm.gic_version field.
- */
-#define LIBXL_HAVE_BUILDINFO_ARM_GIC_VERSION 1
-
-/*
- * libxl_domain_build_info has the arch_arm.tee field.
- */
-#define LIBXL_HAVE_BUILDINFO_ARCH_ARM_TEE 1
-
-/*
- * LIBXL_HAVE_SOFT_RESET indicates that libxl supports performing
- * 'soft reset' for domains and there is 'soft_reset' shutdown reason
- * in enum libxl_shutdown_reason.
- */
-#define LIBXL_HAVE_SOFT_RESET 1
-
-/*
- * LIBXL_HAVE_APIC_ASSIST indicates that the 'apic_assist' value
- * is present in the viridian enlightenment enumeration.
- */
-#define LIBXL_HAVE_APIC_ASSIST 1
-
-/*
- * LIBXL_HAVE_BUILD_ID means that libxl_version_info has the extra
- * field for the hypervisor build_id.
- */
-#define LIBXL_HAVE_BUILD_ID 1
-
-/*
- * LIBXL_HAVE_QEMU_MONITOR_COMMAND indiactes the availability of the
- * libxl_qemu_monitor_command() function.
- */
-#define LIBXL_HAVE_QEMU_MONITOR_COMMAND 1
-
-/*
- * LIBXL_HAVE_SCHED_CREDIT2_PARAMS indicates the existance of a
- * libxl_sched_credit2_params structure, containing Credit2 scheduler
- * wide parameters (i.e., the ratelimiting value).
- */
-#define LIBXL_HAVE_SCHED_CREDIT2_PARAMS 1
-
-/*
- * LIBXL_HAVE_SCHED_CREDIT_MIGR_DELAY indicates that there is a field
- * in libxl_sched_credit_params called vcpu_migr_delay_us which controls
- * the resistance of the vCPUs of the cpupool to migrations among pCPUs.
- */
-#define LIBXL_HAVE_SCHED_CREDIT_MIGR_DELAY
-
-/*
- * LIBXL_HAVE_VIRIDIAN_CRASH_CTL indicates that the 'crash_ctl' value
- * is present in the viridian enlightenment enumeration.
- */
-#define LIBXL_HAVE_VIRIDIAN_CRASH_CTL 1
-
-/*
- * LIBXL_HAVE_VIRIDIAN_SYNIC indicates that the 'synic' value
- * is present in the viridian enlightenment enumeration.
- */
-#define LIBXL_HAVE_VIRIDIAN_SYNIC 1
-
-/*
- * LIBXL_HAVE_VIRIDIAN_STIMER indicates that the 'stimer' value
- * is present in the viridian enlightenment enumeration.
- */
-#define LIBXL_HAVE_VIRIDIAN_STIMER 1
-
-/*
- * LIBXL_HAVE_VIRIDIAN_HCALL_IPI indicates that the 'hcall_ipi' value
- * is present in the viridian enlightenment enumeration.
- */
-#define LIBXL_HAVE_VIRIDIAN_HCALL_IPI 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_HVM_ACPI_LAPTOP_SLATE indicates that
- * libxl_domain_build_info has the u.hvm.acpi_laptop_slate field.
- */
-#define LIBXL_HAVE_BUILDINFO_HVM_ACPI_LAPTOP_SLATE 1
-
-/*
- * LIBXL_HAVE_P9S indicates that the p9 field in IDL has been changed to p9s
- */
-#define LIBXL_HAVE_P9S 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_ARM_VUART indicates that the toolstack supports virtual UART
- * for ARM.
- */
-#define LIBXL_HAVE_BUILDINFO_ARM_VUART 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_GRANT_LIMITS indicates that libxl_domain_build_info
- * has the max_grant_frames and max_maptrack_frames fields.
- */
-#define LIBXL_HAVE_BUILDINFO_GRANT_LIMITS 1
-
-#define LIBXL_MAX_GRANT_DEFAULT (~(uint32_t)0)
-#define LIBXL_MAX_GRANT_FRAMES_DEFAULT 32 /* deprecated */
-#define LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT 1024 /* deprecated */
-/*
- * LIBXL_HAVE_BUILDINFO_GRANT_DEFAULT indicates that the default
- * values of max_grant_frames and max_maptrack_frames fields in
- * libxl_domain_build_info are the special sentinel value
- * LIBXL_MAX_GRANT_DEFAULT rather than the fixed values above.
- * This means to use the hypervisor's default.
- */
-#define LIBXL_HAVE_BUILDINFO_GRANT_DEFAULT 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_* indicates that libxl_domain_build_info has
- * the field represented by the '*'. The original position of those
- * fields is:
- * - u.hvm.timer_mode
- * - u.hvm.apic
- * - u.hvm.nested_hvm
- * - u.pv.bootloader
- * - u.pv.bootloader_args
- */
-#define LIBXL_HAVE_BUILDINFO_TIMER_MODE 1
-#define LIBXL_HAVE_BUILDINFO_APIC 1
-#define LIBXL_HAVE_BUILDINFO_NESTED_HVM 1
-#define LIBXL_HAVE_BUILDINFO_BOOTLOADER 1
-#define LIBXL_HAVE_BUILDINFO_BOOTLOADER_ARGS 1
-
-/*
- * LIBXL_HAVE_EXTENDED_VKB indicates that libxl_device_vkb has extended fields:
- * - unique_id;
- * - feature_disable_keyboard;
- * - feature_disable_pointer;
- * - feature_abs_pointer;
- * - feature_raw_pointer;
- * - feature_multi_touch;
- * - width;
- * - height;
- * - multi_touch_width;
- * - multi_touch_height;
- * - multi_touch_num_contacts.
- */
-#define LIBXL_HAVE_EXTENDED_VKB 1
-
-/*
- * LIBXL_HAVE_PHYSINFO_CAP_HAP_SHADOW indicates that libxl_physinfo has
- * cap_hap and cap_shadow fields reflecting the hardware and Xen availability
- * of Hardware Assisted, and Shadow paging support.
- */
-#define LIBXL_HAVE_PHYSINFO_CAP_HAP_SHADOW 1
-
-/*
- * LIBXL_HAVE_PHYSINFO_CAP_IOMMU_HAP_PT_SHARE indicates that libxl_physinfo
- * has a cap_iommu_hap_pt_share field that indicates whether the hardware
- * supports sharing the IOMMU and HAP page tables.
- */
-#define LIBXL_HAVE_PHYSINFO_CAP_IOMMU_HAP_PT_SHARE 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_IOMMU_MEMKB indicates thate libxl_domain_build_info
- * has an iommu_memkb field which should be set with the amount of memory
- * overhead needed by the domain for populating IOMMU page tables.
- */
-#define LIBXL_HAVE_BUILDINFO_IOMMU_MEMKB 1
-
-/*
- * LIBXL_HAVE_CREATEINFO_PASSTHROUGH indicates that
- * libxl_domain_create_info has a passthrough field (which is a
- * libxl_passthrough enumeration) that indicates whether device pass-
- * through is enabled for the domain and, if so, whether the IOMMU and
- * HAP page tables may be shared or not.
- */
-#define LIBXL_HAVE_CREATEINFO_PASSTHROUGH 1
-
-/*
- * LIBXL_HAVE_DISK_SAFE_REMOVE indicates that the
- * libxl_device_disk_safe_remove() function is defined.
- */
-#define LIBXL_HAVE_DISK_SAFE_REMOVE 1
-
-/*
- * libxl ABI compatibility
- *
- * The only guarantee which libxl makes regarding ABI compatibility
- * across releases is that the SONAME will always be bumped whenever
- * the ABI is changed in an incompatible way.
- *
- * This applies within stable branches as well as
- * development branches. It is possible that a new stable release of
- * Xen may require a rebuild of applications using the
- * library. However per the API compatibility gaurantees such a
- * rebuild should not normally require any source level changes.
- *
- * As with the API compatiblity the SONAME will only be bumped for the
- * first ABI incompatible change in a development branch.
- */
-
-/*
- * libxl memory management
- *
- * From the point of view of the application (ie, libxl's caller),
- * struct libxl_ctx* is threadsafe, and all returned allocated
- * structures are obtained from malloc(), and must be freed by the
- * caller either directly or by calling an appropriate free function
- * provided by libxl. Ie the application does not get automatic
- * assistance from libxl in managing these allocations.
- *
- * Specific details are in the header comments which should be found
- * in libxl.h or libxlutil.h, next to the relevant function
- * declarations.
- *
- * Internally, libxl has a garbage collection scheme which allows much libxl
- * code to allocate strings etc. for internal use without needing to
- * free them. These are called "temporary allocations".
- *
- * The pool for these temporary allocations, along with any other
- * thread-specific data which is private to libxl but shared between
- * libxl functions (such as the current xenstore transaction), is
- * stored in the "gc context" which is a special enhanced context
- * structure allocated automatically by convenience macros at every
- * entry to libxl.
- *
- * Every libxl function falls into one of these categories:
- *
- * 1. Public functions (declared in libxl.h, libxlutil.h), which may
- * be called by libxl applications. If a public function returns
- * any allocated object to its caller, that object must have come
- * from malloc.
- *
- * The definitions of public functions MUST use the gc context
- * initialisation macros (or do the equivalent work themselves).
- * These macros will ensure that all temporary allocations will be
- * automatically freed before the function returns to its caller.
- *
- * A public function may be called from within libxl; the call
- * context initialisation macros will make sure that the internal
- * caller's context is reused (eg, so that the same xenstore
- * transaction is used). But in-libxl callers of libxl public
- * functions should note that any libxl public function may cause
- * recursively reentry into libxl via the application's event
- * callback hook.
- *
- * Public functions have names like libxl_foobar.
- *
- * 2. Private functions, which may not be called by libxl
- * applications; they are not declared in libxl.h or libxlutil.h
- * and they may not be called other than by other libxl functions.
- *
- * Private functions should not use the gc context initialisation
- * macros.
- *
- * Private functions have names like libxl__foobar (NB, two underscores).
- * Also the declaration of such functions must be preceeded by the _hidden
- * macro.
- *
- * Allocations made by a libxl function fall into one of the following
- * categories (where "object" includes any memory allocation):
- *
- * (a) Objects which are not returned to the function's caller.
- * These should be allocated from the temporary pool.
- *
- * (b) Objects which are intended for return to the calling
- * application. This includes all allocated objects returned by
- * any public function.
- *
- * It may also include objects allocated by an internal function
- * specifically for eventual return by the function's external
- * callers, but this situation should be clearly documented in
- * comments.
- *
- * These should be allocated from malloc() et al. and comments
- * near the function declaration should explain the memory
- * ownership. If a simple free() by the application is not
- * sufficient, a suitable public freeing function should be
- * provided.
- *
- * (c) Internal objects whose size and/or lifetime dictate explicit
- * memory management within libxl. This includes objects which
- * will be embedded in opaque structures which will be returned to
- * the libxl caller (more generally, any internal object whose
- * lifetime exceeds the libxl entrypoint which creates it) and
- * objects which are so large or numerous that explicit memory
- * management is required.
- *
- * These should be allocated from malloc() et al., and freed
- * explicitly at the appropriate point. The situation should be
- * documented in comments.
- *
- * (d) Objects which are allocated by internal-only functions and
- * returned to the function's (therefore, internal) caller but are
- * strictly for internal use by other parts of libxl. These
- * should be allocated from the temporary pool.
- *
- * Where a function's primary purpose is to return such an object,
- * it should have a libxl__gc * as it's first argument.
- *
- * Note that there are two ways to change an allocation from this
- * category to the "public" category. Either the implementation
- * is kept internal and a wrapper function duplicates all memory
- * allocations so that they are suitable for return to external
- * callers or the implementation uses plain malloc() et al calls
- * and an internal wrapper adds the relevant pointers to the gc.
- * The latter method is preferred for obvious performance reasons.
- *
- * No temporary objects allocated from the pool may be explicitly freed.
- * Therefore public functions which initialize a libxl__gc MUST call
- * libxl__free_all() before returning.
- *
- * Memory allocation failures are not handled gracefully. If malloc
- * (or realloc) fails, libxl will cause the entire process to print
- * a message to stderr and exit with status 255.
- */
-/*
- * libxl types
- *
- * Most libxl types are defined by the libxl IDL (see
- * libxl_types.idl). The library provides a common set of methods for
- * initialising and freeing these types.
- *
- * IDL-generated libxl types should be used as follows: the user must
- * always call the "init" function before using a type, even if the
- * variable is simply being passed by reference as an out parameter
- * to a libxl function. The user must always calls "dispose" exactly
- * once afterwards, to clean up, regardless of whether operations on
- * this object succeeded or failed. See the xl code for examples.
- *
- * "init" and "dispose" are idempotent.
- *
- * void libxl_<type>_init(<type> *p):
- *
- * Initialises the members of "p" to all defaults. These may either
- * be special value which indicates to the library that it should
- * select an appropriate default when using this field or actual
- * default values.
- *
- * Some fields within a data type (e.g. unions) cannot be sensibly
- * initialised without further information. In these cases a
- * separate subfield initialisation function is provided (see
- * below).
- *
- * An instance which has been initialised using this method can
- * always be safely passed to the dispose function (see
- * below). This is true even if the data type contains fields which
- * require a separate call to a subfield initialisation function.
- *
- * This method is provided for any aggregate type which is used as
- * an input parameter.
- *
- * void libxl_<type>_init_<subfield>(<type> *p, subfield):
- *
- * Initialise those parts of "p" which are not initialised by the
- * main init function due to the unknown value of "subfield". Sets
- * p->subfield as well as initialising any fields to their default
- * values.
- *
- * p->subfield must not have been previously initialised.
- *
- * This method is provided for any aggregate type.
- *
- * void libxl_<type>_dispose(instance *p):
- *
- * Frees any dynamically allocated memory used by the members of
- * "p" but not the storage used by "p" itself (this allows for the
- * allocation of arrays of types and for the composition of types).
- *
- * char *libxl_<type>_to_json(instance *p)
- *
- * Generates a JSON object from "p" in the form of a NULL terminated
- * string.
- *
- * <type *> libxl_<type>_from_json(const char *json)
- * int libxl_<type>_from_json(const char *json)
- *
- * Parses "json" and returns:
- *
- * an int value, if <type> is enumeration type. The value is the enum value
- * representing the respective string in "json".
- *
- * an instance of <type>, if <type> is aggregate type. The returned
- * instance has its fields filled in by the parser according to "json".
- *
- * If the parsing fails, caller cannot rely on the value / instance
- * returned.
- */
-#ifndef LIBXL_H
-#define LIBXL_H
-
-#include <stdbool.h>
-#include <stdint.h>
-#include <stdarg.h>
-#include <string.h>
-#include <errno.h>
-#include <netinet/in.h>
-#include <sys/wait.h> /* for pid_t */
-
-#include <xentoollog.h>
-
-typedef struct libxl__ctx libxl_ctx;
-
-#include <libxl_uuid.h>
-#include <_libxl_list.h>
-
-/* API compatibility. */
-#ifdef LIBXL_API_VERSION
-#if LIBXL_API_VERSION != 0x040200 && LIBXL_API_VERSION != 0x040300 && \
- LIBXL_API_VERSION != 0x040400 && LIBXL_API_VERSION != 0x040500 && \
- LIBXL_API_VERSION != 0x040700 && LIBXL_API_VERSION != 0x040800 && \
- LIBXL_API_VERSION != 0x041300 && LIBXL_API_VERSION != 0x041400
-#error Unknown LIBXL_API_VERSION
-#endif
-#endif
-
-/* LIBXL_HAVE_RETRIEVE_DOMAIN_CONFIGURATION
- *
- * If this is defined we have libxl_retrieve_domain_configuration which
- * returns the current configuration of a domain, which can be used to
- * rebuild a domain.
- */
-#define LIBXL_HAVE_RETRIEVE_DOMAIN_CONFIGURATION 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_VCPU_AFFINITY_ARRAYS
- *
- * If this is defined, then the libxl_domain_build_info structure will
- * contain two arrays of libxl_bitmap-s, with all the necessary information
- * to set the hard affinity (vcpu_hard_affinity) and the soft affinity
- * (vcpu_soft_affinity) of the VCPUs.
- *
- * Note that, if the vcpu_hard_affinity array is used, libxl will ignore
- * the content of the cpumap field of libxl_domain_build_info. That is to
- * say, if the array is allocated and used by the caller, it is it and
- * only it that determines the hard affinity of the domain's VCPUs.
- *
- * The number of libxl_bitmap-s in the arrays should be equal to the
- * maximum number of VCPUs of the domain. If there only are N elements in
- * an array, with N smaller the the maximum number of VCPUs, the hard or
- * soft affinity (depending on which array we are talking about) will be
- * set only for the first N VCPUs. The other VCPUs will just have affinity,
- * both hard and soft, with all the host PCPUs.
- * Each bitmap should be big enough to accommodate the maximum number of
- * PCPUs of the host.
- */
-#define LIBXL_HAVE_BUILDINFO_VCPU_AFFINITY_ARRAYS 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_VKB_DEVICE
- *
- * If this is defined, then the libxl_domain_build_info structure will
- * contain a boolean hvm.vkb_device which instructs libxl whether to include
- * a vkbd at build time or not.
- */
-#define LIBXL_HAVE_BUILDINFO_VKB_DEVICE 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_USBDEVICE_LIST
- *
- * If this is defined, then the libxl_domain_build_info structure will
- * contain hvm.usbdevice_list, a libxl_string_list type that contains
- * a list of USB devices to specify on the qemu command-line.
- *
- * If it is set, callers may use either hvm.usbdevice or
- * hvm.usbdevice_list, but not both; if both are set, libxl will
- * throw an error.
- *
- * If this is not defined, callers can only use hvm.usbdevice. Note
- * that this means only one device can be added at domain build time.
- */
-#define LIBXL_HAVE_BUILDINFO_USBDEVICE_LIST 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_USBVERSION
- *
- * If this is defined, then the libxl_domain_build_info structure will
- * contain hvm.usbversion, a integer type that contains a USB
- * controller version to specify on the qemu upstream command-line.
- *
- * If it is set, callers may use hvm.usbversion to specify if the usb
- * controller is usb1, usb2 or usb3.
- *
- * If this is not defined, the hvm.usbversion field does not exist.
- */
-#define LIBXL_HAVE_BUILDINFO_USBVERSION 1
-
-/*
- * LIBXL_HAVE_DEVICE_BACKEND_DOMNAME
- *
- * If this is defined, libxl_device_* structures containing a backend_domid
- * field also contain a backend_domname field. If backend_domname is set, it is
- * resolved to a domain ID when the device is used and takes precedence over the
- * backend_domid field.
- *
- * If this is not defined, the backend_domname field does not exist.
- */
-#define LIBXL_HAVE_DEVICE_BACKEND_DOMNAME 1
-
-/*
- * LIBXL_HAVE_NONCONST_EVENT_OCCURS_EVENT_ARG
- *
- * This argument was erroneously "const" in the 4.2 release despite
- * the requirement for the callback to free the event.
- */
-#if LIBXL_API_VERSION != 0x040200
-#define LIBXL_HAVE_NONCONST_EVENT_OCCURS_EVENT_ARG 1
-#endif
-
-/*
- * LIBXL_HAVE_NONCONST_LIBXL_BASENAME_RETURN_VALUE
- *
- * The return value of libxl_basename is malloc'ed but the erroneously
- * marked as "const" in releases before 4.5.
- */
-#if !defined(LIBXL_API_VERSION) || LIBXL_API_VERSION >= 0x040500
-#define LIBXL_HAVE_NONCONST_LIBXL_BASENAME_RETURN_VALUE 1
-#endif
-
-/*
- * LIBXL_HAVE_PHYSINFO_OUTSTANDING_PAGES
- *
- * If this is defined, libxl_physinfo structure will contain an uint64 field
- * called outstanding_pages, containing the number of pages claimed but not
- * yet allocated for all domains.
- */
-#define LIBXL_HAVE_PHYSINFO_OUTSTANDING_PAGES 1
-
-/*
- * LIBXL_HAVE_PHYSINFO_MAX_POSSIBLE_MFN
- *
- * If this is defined, libxl_physinfo structure will contain an uint64 field
- * called max_possible_mfn, containing the highest possible mfn on this host,
- * possibly taking memory hotplug into account.
- */
-#define LIBXL_HAVE_PHYSINFO_MAX_POSSIBLE_MFN 1
-
-/*
- * LIBXL_HAVE_DOMINFO_OUTSTANDING_MEMKB 1
- *
- * If this is defined, libxl_dominfo will contain a MemKB type field called
- * outstanding_memkb, containing the amount of claimed but not yet allocated
- * memory for a specific domain.
- */
-#define LIBXL_HAVE_DOMINFO_OUTSTANDING_MEMKB 1
-
-/*
- * LIBXL_HAVE_DOMINFO_NEVER_STOP
- *
- * If this is defined, libxl_dominfo will contain a flag called never_stop
- * indicating that the specific domain should never be stopped by the
- * toolstack.
- */
-#define LIBXL_HAVE_DOMINFO_NEVER_STOP 1
-
-/*
- * LIBXL_HAVE_QXL
- *
- * If defined, then the libxl_vga_interface_type will contain another value:
- * "QXL". This value define if qxl vga is supported.
- *
- * If this is not defined, the qxl vga support is missed.
- */
-#define LIBXL_HAVE_QXL 1
-
-/*
- * LIBXL_HAVE_SPICE_VDAGENT
- *
- * If defined, then the libxl_spice_info structure will contain a boolean type:
- * vdagent and clipboard_sharing. These values define if Spice vdagent and
- * clipboard sharing are enabled.
- *
- * If this is not defined, the Spice vdagent support is ignored.
- */
-#define LIBXL_HAVE_SPICE_VDAGENT 1
-
-/*
- * LIBXL_HAVE_SPICE_USBREDIRECTION
- *
- * If defined, then the libxl_spice_info structure will contain an integer type
- * field: usbredirection. This value defines if Spice usbredirection is enabled
- * and with how much channels.
- *
- * If this is not defined, the Spice usbredirection support is ignored.
- */
-#define LIBXL_HAVE_SPICE_USBREDIREDIRECTION 1
-
-/*
- * LIBXL_HAVE_SPICE_IMAGECOMPRESSION
- *
- * If defined, then the libxl_spice_info structure will contain a string type
- * field: image_compression. This value defines what Spice image compression
- * is used.
- *
- * If this is not defined, the Spice image compression setting support is ignored.
- */
-#define LIBXL_HAVE_SPICE_IMAGECOMPRESSION 1
-
-/*
- * LIBXL_HAVE_SPICE_STREAMINGVIDEO
- *
- * If defined, then the libxl_spice_info structure will contain a string type
- * field: streaming_video. This value defines what Spice streaming video setting
- * is used.
- *
- * If this is not defined, the Spice streaming video setting support is ignored.
- */
-#define LIBXL_HAVE_SPICE_STREAMINGVIDEO 1
-
-/*
- * LIBXL_HAVE_HVM_HDTYPE
- *
- * If defined, then the u.hvm structure will contain a enum type
- * hdtype.
- */
-#define LIBXL_HAVE_HVM_HDTYPE 1
-
-/*
- * LIBXL_HAVE_DOMAIN_CREATE_RESTORE_PARAMS 1
- *
- * If this is defined, libxl_domain_create_restore()'s API has changed to
- * include a params structure.
- */
-#define LIBXL_HAVE_DOMAIN_CREATE_RESTORE_PARAMS 1
-
-/*
- * LIBXL_HAVE_DOMAIN_CREATE_RESTORE_SEND_BACK_FD 1
- *
- * If this is defined, libxl_domain_create_restore()'s API includes the
- * send_back_fd param. This is used only with COLO, for the libxl migration
- * back channel; other callers should pass -1.
- */
-#define LIBXL_HAVE_DOMAIN_CREATE_RESTORE_SEND_BACK_FD 1
-
-/*
- * LIBXL_HAVE_DRIVER_DOMAIN_CREATION 1
- *
- * If this is defined, libxl_domain_create_info contains a driver_domain
- * field that can be used to tell libxl that the domain that is going
- * to be created is a driver domain, so the necessary actions are taken.
- */
-#define LIBXL_HAVE_DRIVER_DOMAIN_CREATION 1
-
-/*
- * LIBXL_HAVE_SIGCHLD_SELECTIVE_REAP
- *
- * If this is defined:
- *
- * Firstly, the enum libxl_sigchld_owner (in libxl_event.h) has the
- * value libxl_sigchld_owner_libxl_always_selective_reap which may be
- * passed to libxl_childproc_setmode in hooks->chldmode.
- *
- * Secondly, the function libxl_childproc_sigchld_occurred exists.
- */
-#define LIBXL_HAVE_SIGCHLD_OWNER_SELECTIVE_REAP 1
-
-/*
- * LIBXL_HAVE_SIGCHLD_SHARING
- *
- * If this is defined, it is permissible for multiple libxl ctxs
- * to simultaneously "own" SIGCHLD. See "Subprocess handling"
- * in libxl_event.h.
- */
-#define LIBXL_HAVE_SIGCHLD_SHARING 1
-
-/*
- * LIBXL_HAVE_NO_SUSPEND_RESUME
- *
- * Is this is defined then the platform has no support for saving,
- * restoring or migrating a domain. In this case the related functions
- * should be expected to return failure. That is:
- * - libxl_domain_suspend
- * - libxl_domain_resume
- * - libxl_domain_remus_start
- */
-#if defined(__arm__) || defined(__aarch64__)
-#define LIBXL_HAVE_NO_SUSPEND_RESUME 1
-#endif
-
-/*
- * LIBXL_HAVE_DOMAIN_SUSPEND_ONLY
- *
- * If this is defined, function libxl_domains_suspend_only() is available.
- */
-
-#define LIBXL_HAVE_DOMAIN_SUSPEND_ONLY 1
-
-/*
- * LIBXL_HAVE_DEVICE_PCI_SEIZE
- *
- * If this is defined, then the libxl_device_pci struct will contain
- * the "seize" boolean field. If this field is set, libxl_pci_add will
- * check to see if the device is currently assigned to pciback, and if not,
- * it will attempt to do so (unbinding the device from the existing driver).
- */
-#define LIBXL_HAVE_DEVICE_PCI_SEIZE 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_KERNEL
- *
- * If this is defined, then the libxl_domain_build_info structure will
- * contain 'kernel', 'ramdisk', 'cmdline' fields. 'kernel' is a string
- * to indicate kernel image location, 'ramdisk' is a string to indicate
- * ramdisk location, 'cmdline' is a string to indicate the paramters which
- * would be appended to kernel image.
- *
- * Both PV guest and HVM guest can use these fields for direct kernel boot.
- * But for compatibility reason, u.pv.kernel, u.pv.ramdisk and u.pv.cmdline
- * still exist.
- */
-#define LIBXL_HAVE_BUILDINFO_KERNEL 1
-
-/*
- * LIBXL_HAVE_DEVICE_CHANNEL
- *
- * If this is defined, then the libxl_device_channel struct exists
- * and channels can be attached to a domain. Channels manifest as consoles
- * with names, see docs/misc/console.txt.
- */
-#define LIBXL_HAVE_DEVICE_CHANNEL 1
-
-/*
- * LIBXL_HAVE_AO_ABORT indicates the availability of libxl_ao_abort
- */
-#define LIBXL_HAVE_AO_ABORT 1
-
-/* Functions annotated with LIBXL_EXTERNAL_CALLERS_ONLY may not be
- * called from within libxl itself. Callers outside libxl, who
- * do not #include libxl_internal.h, are fine. */
-#ifndef LIBXL_EXTERNAL_CALLERS_ONLY
-#define LIBXL_EXTERNAL_CALLERS_ONLY /* disappears for callers outside libxl */
-#endif
-
-/*
- * LIBXL_HAVE_UUID_COPY_CTX_PARAM
- *
- * If this is defined, libxl_uuid_copy has changed to take a libxl_ctx
- * structure.
- */
-#define LIBXL_HAVE_UUID_COPY_CTX_PARAM 1
-
-/*
- * LIBXL_HAVE_SSID_LABEL
- *
- * If this is defined, then libxl IDL contains string of XSM security
- * label in all XSM related structures.
- *
- * If set this string takes precedence over the numeric field.
- */
-#define LIBXL_HAVE_SSID_LABEL 1
-
-/*
- * LIBXL_HAVE_CPUPOOL_NAME
- *
- * If this is defined, then libxl IDL contains string of CPU pool
- * name in all CPU pool related structures.
- *
- * If set this string takes precedence over the numeric field.
- */
-#define LIBXL_HAVE_CPUPOOL_NAME 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_SERIAL_LIST
- *
- * If this is defined, then the libxl_domain_build_info structure will
- * contain hvm.serial_list, a libxl_string_list type that contains
- * a list of serial ports to specify on the qemu command-line.
- *
- * If it is set, callers may use either hvm.serial or
- * hvm.serial_list, but not both; if both are set, libxl will
- * throw an error.
- *
- * If this is not defined, callers can only use hvm.serial. Note
- * that this means only one serial port can be added at domain build time.
- */
-#define LIBXL_HAVE_BUILDINFO_SERIAL_LIST 1
-
-/*
- * LIBXL_HAVE_ALTP2M
- * If this is defined, then libxl supports alternate p2m functionality.
- */
-#define LIBXL_HAVE_ALTP2M 1
-
-/*
- * LIBXL_HAVE_REMUS
- * If this is defined, then libxl supports remus.
- */
-#define LIBXL_HAVE_REMUS 1
-
-/*
- * LIBXL_HAVE_COLO_USERSPACE_PROXY
- * If this is defined, then libxl supports COLO userspace proxy.
- */
-#define LIBXL_HAVE_COLO_USERSPACE_PROXY 1
-
-typedef uint8_t libxl_mac[6];
-#define LIBXL_MAC_FMT "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx"
-#define LIBXL_MAC_FMTLEN ((2*6)+5) /* 6 hex bytes plus 5 colons */
-#define LIBXL_MAC_BYTES(mac) mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]
-void libxl_mac_copy(libxl_ctx *ctx, libxl_mac *dst, const libxl_mac *src);
-
-#if defined(__i386__) || defined(__x86_64__)
-/*
- * LIBXL_HAVE_PSR_CMT
- *
- * If this is defined, the Cache Monitoring Technology feature is supported.
- */
-#define LIBXL_HAVE_PSR_CMT 1
-
-/*
- * LIBXL_HAVE_PSR_MBM
- *
- * If this is defined, the Memory Bandwidth Monitoring feature is supported.
- */
-#define LIBXL_HAVE_PSR_MBM 1
-
-/*
- * LIBXL_HAVE_PSR_CAT
- *
- * If this is defined, the Cache Allocation Technology feature is supported.
- */
-#define LIBXL_HAVE_PSR_CAT 1
-
-/*
- * LIBXL_HAVE_PSR_CDP
- *
- * If this is defined, the Code and Data Prioritization feature is supported.
- */
-#define LIBXL_HAVE_PSR_CDP 1
-
-/*
- * LIBXL_HAVE_PSR_L2_CAT
- *
- * If this is defined, the L2 Cache Allocation Technology feature is supported.
- */
-#define LIBXL_HAVE_PSR_L2_CAT 1
-
-/*
- * LIBXL_HAVE_PSR_GENERIC
- *
- * If this is defined, the Memory Bandwidth Allocation feature is supported.
- * The following public functions are available:
- * libxl_psr_{set/get}_val
- * libxl_psr_get_hw_info
- * libxl_psr_hw_info_list_free
- */
-#define LIBXL_HAVE_PSR_GENERIC 1
-
-/*
- * LIBXL_HAVE_MCA_CAPS
- *
- * If this is defined, setting MCA capabilities for HVM domain is supported.
- */
-#define LIBXL_HAVE_MCA_CAPS 1
-#endif
-
-/*
- * LIBXL_HAVE_PCITOPOLOGY
- *
- * If this is defined, then interface to query hypervisor about PCI device
- * topology is available.
- */
-#define LIBXL_HAVE_PCITOPOLOGY 1
-
-/*
- * LIBXL_HAVE_SOCKET_BITMAP
- *
- * If this is defined, then libxl_socket_bitmap_alloc and
- * libxl_get_online_socketmap exist.
- */
-#define LIBXL_HAVE_SOCKET_BITMAP 1
-
-/*
- * LIBXL_HAVE_SRM_V2
- *
- * If this is defined, then the libxl_domain_create_restore() interface takes
- * a "stream_version" parameter and supports a value of 2.
- *
- * libxl_domain_suspend() will produce a v2 stream.
- */
-#define LIBXL_HAVE_SRM_V2 1
-
-/*
- * LIBXL_HAVE_SRM_V1
- *
- * In the case that LIBXL_HAVE_SRM_V2 is set, LIBXL_HAVE_SRM_V1
- * indicates that libxl_domain_create_restore() can handle a "stream_version"
- * parameter of 1, and convert the stream format automatically.
- */
-#define LIBXL_HAVE_SRM_V1 1
-
-/*
- * libxl_domain_build_info has the u.hvm.gfx_passthru_kind field and
- * the libxl_gfx_passthru_kind enumeration is defined.
-*/
-#define LIBXL_HAVE_GFX_PASSTHRU_KIND
-
-/*
- * LIBXL_HAVE_CHECKPOINTED_STREAM
- *
- * If this is defined, then libxl_checkpointed_stream exists.
- */
-#define LIBXL_HAVE_CHECKPOINTED_STREAM 1
-
-/*
- * LIBXL_HAVE_BUILDINFO_HVM_SYSTEM_FIRMWARE
- *
- * libxl_domain_build_info has u.hvm.system_firmware field which can be use
- * to provide a different firmware blob (like SeaBIOS or OVMF).
- */
-#define LIBXL_HAVE_BUILDINFO_HVM_SYSTEM_FIRMWARE
-
-/*
- * ERROR_REMUS_XXX error code only exists from Xen 4.5, Xen 4.6 and it
- * is changed to ERROR_CHECKPOINT_XXX in Xen 4.7
- */
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION >= 0x040500 \
- && LIBXL_API_VERSION < 0x040700
-#define ERROR_REMUS_DEVOPS_DOES_NOT_MATCH \
- ERROR_CHECKPOINT_DEVOPS_DOES_NOT_MATCH
-#define ERROR_REMUS_DEVICE_NOT_SUPPORTED \
- ERROR_CHECKPOINT_DEVICE_NOT_SUPPORTED
-#endif
-
-/*
- * LIBXL_HAVE_VGA_INTERFACE_TYPE_UNKNOWN
- *
- * In the case that LIBXL_HAVE_VGA_INTERFACE_TYPE_UNKNOWN is set the
- * libxl_vga_interface_type enumeration type contains a
- * LIBXL_VGA_INTERFACE_TYPE_UNKNOWN identifier. This is used to signal
- * that a libxl_vga_interface_type type has not been initialized yet.
- */
-#define LIBXL_HAVE_VGA_INTERFACE_TYPE_UNKNOWN 1
-
-/*
- * LIBXL_HAVE_BYTEARRAY_UUID
- *
- * If this is defined, the internal member of libxl_uuid is defined
- * as a 16 byte array that contains the UUID in big endian format.
- * Also, the same structure layout is used across all OSes.
- */
-#define LIBXL_HAVE_BYTEARRAY_UUID 1
-
-/*
- * LIBXL_HAVE_MEMKB_64BITS
- *
- * If this is defined libxl_set_memory_target(), libxl_domain_setmaxmem()
- * and libxl_wait_for_free_memory() will take a 64 bit value for the memory
- * size parameter.
- * From Xen 4.8 on libxl_get_memory_target(), libxl_domain_need_memory() and
- * libxl_get_free_memory() return the memory size in a 64 bit value, too.
- */
-#define LIBXL_HAVE_MEMKB_64BITS 1
-
-/*
- * LIBXL_HAVE_QED
- *
- * If this is defined QED disk formats can be used for both HVM and PV guests.
- */
-#define LIBXL_HAVE_QED 1
-
-/*
- * LIBXL_HAVE_SET_PARAMETERS
- *
- * If this is defined setting hypervisor parameters is supported.
- */
-#define LIBXL_HAVE_SET_PARAMETERS 1
-
-/*
- * LIBXL_HAVE_PV_SHIM
- *
- * If this is defined, libxl_domain_build_info's pvh type information
- * contains members pvshim, pvshim_path, pvshim_cmdline, pvshim_extra.
- */
-#define LIBXL_HAVE_PV_SHIM 1
-
-/*
- * LIBXL_HAVE_PVCALLS
- *
- * If this is defined, libxl supports creating pvcalls interfaces.
- */
-#define LIBXL_HAVE_PVCALLS 1
-
-/*
- * LIBXL_HAVE_FN_USING_QMP_ASYNC
- *
- * This define indicates that some function's API has changed and have an
- * extra parameter "ao_how" which means that the function can be executed
- * asynchronously. Those functions are:
- * libxl_domain_pause()
- * libxl_domain_unpause()
- * libxl_send_trigger()
- * libxl_set_vcpuonline()
- * libxl_retrieve_domain_configuration()
- * libxl_qemu_monitor_command()
- * libxl_domain_shutdown()
- * libxl_domain_reboot()
- */
-#define LIBXL_HAVE_FN_USING_QMP_ASYNC 1
-
-/*
- * LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONFIG
- *
- * If this is set, libxl_domain_need_memory takes a
- * libxl_domain_config* (non-const) and uint32_t domid_for_logging
- * (instead of a const libxl_domain_build_info*).
- *
- * If this is set, there is no need to call
- * libxl_get_required_shadow_memory and instead the caller should
- * simply leave shadow_memkb set to LIBXL_MEMKB_DEFAULT and allow
- * libxl to fill in a suitable default in the usual way.
- */
-#define LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONFIG
-
-/*
- * LIBXL_HAVE_CREATEINFO_DOMID
- *
- * libxl_domain_create_new() and libxl_domain_create_restore() will use
- * a domid specified in libxl_domain_create_info.
- */
-#define LIBXL_HAVE_CREATEINFO_DOMID
-
-/*
- * LIBXL_HAVE_CREATEINFO_XEND_SUSPEND_EVTCHN_COMPAT
- *
- * libxl_domain_create_info contains a boolean 'xend_suspend_evtchn_compat'
- * value to control creation of the xenstore path for a domain's suspend
- * event channel.
- */
-#define LIBXL_HAVE_CREATEINFO_XEND_SUSPEND_EVTCHN_COMPAT
-
-typedef char **libxl_string_list;
-void libxl_string_list_dispose(libxl_string_list *sl);
-int libxl_string_list_length(const libxl_string_list *sl);
-void libxl_string_list_copy(libxl_ctx *ctx, libxl_string_list *dst,
- const libxl_string_list *src);
-
-typedef char **libxl_key_value_list;
-void libxl_key_value_list_dispose(libxl_key_value_list *kvl);
-int libxl_key_value_list_length(const libxl_key_value_list *kvl);
-void libxl_key_value_list_copy(libxl_ctx *ctx,
- libxl_key_value_list *dst,
- const libxl_key_value_list *src);
-
-typedef uint32_t libxl_hwcap[8];
-void libxl_hwcap_copy(libxl_ctx *ctx, libxl_hwcap *dst, const libxl_hwcap *src);
-
-typedef uint64_t libxl_ev_user;
-
-typedef struct {
- uint32_t size; /* number of bytes in map */
- uint8_t *map;
-} libxl_bitmap;
-void libxl_bitmap_init(libxl_bitmap *map);
-void libxl_bitmap_dispose(libxl_bitmap *map);
-
-/*
- * libxl_cpuid_policy is opaque in the libxl ABI. Users of both libxl and
- * libxc may not make assumptions about xc_xend_cpuid.
- */
-typedef struct xc_xend_cpuid libxl_cpuid_policy;
-typedef libxl_cpuid_policy * libxl_cpuid_policy_list;
-void libxl_cpuid_dispose(libxl_cpuid_policy_list *cpuid_list);
-int libxl_cpuid_policy_list_length(const libxl_cpuid_policy_list *l);
-void libxl_cpuid_policy_list_copy(libxl_ctx *ctx,
- libxl_cpuid_policy_list *dst,
- const libxl_cpuid_policy_list *src);
-
-#define LIBXL_PCI_FUNC_ALL (~0U)
-
-typedef uint32_t libxl_domid;
-typedef int libxl_devid;
-
-/*
- * Formatting Enumerations.
- *
- * Each enumeration type libxl_E declares an associated lookup table
- * libxl_E_string_table and a lookup function libxl_E_from_string.
- */
-typedef struct {
- const char *s;
- int v;
-} libxl_enum_string_table;
-
-struct libxl_event;
-typedef LIBXL_TAILQ_ENTRY(struct libxl_event) libxl_ev_link;
-
-/*
- * A boolean variable with an explicit default state.
- *
- * Users should treat this struct as opaque and use the following
- * defined macros and accessor functions.
- *
- * To allow users of the library to naively select all defaults this
- * state is represented as 0. False is < 0 and True is > 0.
- */
-typedef struct {
- int val;
-} libxl_defbool;
-
-void libxl_defbool_set(libxl_defbool *db, bool b);
-/* Resets to default */
-void libxl_defbool_unset(libxl_defbool *db);
-/* Sets db only if it is currently == default */
-void libxl_defbool_setdefault(libxl_defbool *db, bool b);
-bool libxl_defbool_is_default(libxl_defbool db);
-/* db must not be == default */
-bool libxl_defbool_val(libxl_defbool db);
-
-const char *libxl_defbool_to_string(libxl_defbool b);
-
-#define LIBXL_TIMER_MODE_DEFAULT -1
-#define LIBXL_MEMKB_DEFAULT ~0ULL
-
-/*
- * We'd like to set a memory boundary to determine if we need to check
- * any overlap with reserved device memory.
- */
-#define LIBXL_RDM_MEM_BOUNDARY_MEMKB_DEFAULT (2048 * 1024)
-
-#define LIBXL_MS_VM_GENID_LEN 16
-typedef struct {
- uint8_t bytes[LIBXL_MS_VM_GENID_LEN];
-} libxl_ms_vm_genid;
-
-#include "_libxl_types.h"
-
-const libxl_version_info* libxl_get_version_info(libxl_ctx *ctx);
-
-/*
- * Some libxl operations can take a long time. These functions take a
- * parameter to control their concurrency:
- * libxl_asyncop_how *ao_how
- *
- * If ao_how==NULL, the function will be synchronous.
- *
- * If ao_how!=NULL, the function will set the operation going, and if
- * this is successful will return 0. In this case the zero error
- * response does NOT mean that the operation was successful; it just
- * means that it has been successfully started. It will finish later,
- * perhaps with an error.
- *
- * If ao_how->callback!=NULL, the callback will be called when the
- * operation completes. The same rules as for libxl_event_hooks
- * apply, including the reentrancy rules and the possibility of
- * "disaster", except that libxl calls ao_how->callback instead of
- * libxl_event_hooks.event_occurs. (See libxl_event.h.)
- *
- * If ao_how->callback==NULL, a libxl_event will be generated which
- * can be obtained from libxl_event_wait or libxl_event_check. The
- * event will have type OPERATION_COMPLETE (which is not used
- * elsewhere).
- *
- * Note that it is possible for an asynchronous operation which is to
- * result in a callback to complete during its initiating function
- * call. In this case the initiating function will return 0
- * indicating the at the operation is "in progress", even though by
- * the time it returns the operation is complete and the callback has
- * already happened.
- *
- * The application must set and use ao_how->for_event (which will be
- * copied into libxl_event.for_user) or ao_how->for_callback (passed
- * to the callback) to determine which operation finished, and it must
- * of course check the rc value for errors.
- *
- * *ao_how does not need to remain valid after the initiating function
- * returns. All other parameters must remain valid for the lifetime of
- * the asynchronous operation, unless otherwise specified.
- *
- * Callbacks may occur on any thread in which the application calls
- * libxl.
- */
-
-typedef struct {
- void (*callback)(libxl_ctx *ctx, int rc, void *for_callback);
- union {
- libxl_ev_user for_event; /* used if callback==NULL */
- void *for_callback; /* passed to callback */
- } u;
-} libxl_asyncop_how;
-
-/*
- * Some more complex asynchronous operations can report intermediate
- * progress. How this is to be reported is controlled, for each
- * function, by a parameter
- * libxl_asyncprogress_how *aop_FOO_how;
- * for each kind of progress FOO supported by that function. Each
- * such kind of progress is associated with an event type.
- *
- * The function description will document whether, when, and how
- * many times, the intermediate progress will be reported, and
- * what the corresponding event type(s) are.
- *
- * If aop_FOO_how==NULL, intermediate progress reports are discarded.
- *
- * If aop_FOO_how->callback==NULL, intermediate progress reports
- * generate libxl events which can be obtained from libxl_event_wait
- * or libxl_event_check.
- *
- * If aop_FOO_how->callback!=NULL, libxl will report intermediate
- * progress by calling callback(ctx, &event, for_callback).
- *
- * The rules for these events are otherwise the same as those for
- * ordinary events. The reentrancy and threading rules for the
- * callback are the same as those for ao completion callbacks.
- *
- * Note that the callback, if provided, is responsible for freeing
- * the event.
- *
- * If callbacks are requested, they will be made, and returned, before
- * the long-running libxl operation is considered finished (so if the
- * long-running libxl operation was invoked with ao_how==NULL then any
- * callbacks will occur strictly before the long-running operation
- * returns). However, the callbacks may occur on any thread.
- *
- * In general, otherwise, no promises are made about the relative
- * order of callbacks in a multithreaded program. In particular
- * different callbacks relating to the same long-running operation may
- * be delivered out of order.
- */
-
-typedef struct {
- void (*callback)(libxl_ctx *ctx, libxl_event*, void *for_callback);
- libxl_ev_user for_event; /* always used */
- void *for_callback; /* passed to callback */
-} libxl_asyncprogress_how;
-
-/*
- * It is sometimes possible to abort an asynchronous operation.
- *
- * libxl_ao_abort searches for an ongoing asynchronous operation whose
- * ao_how is identical to *how, and tries to abort it. The return
- * values from libxl_ao_abort are as follows:
- *
- * 0
- *
- * The operation was found, and attempts are being made to cut it
- * short. However, it may still take some time to stop. It is
- * also possible that the operation will nevertheless complete
- * successfully.
- *
- * ERROR_NOTFOUND
- *
- * No matching ongoing operation was found. This might happen
- * for an actual operation if the operation has already completed
- * (perhaps on another thread). The call to libxl_ao_abort has
- * had no effect.
- *
- * ERROR_ABORTED
- *
- * The operation has already been the subject of at least one
- * call to libxl_ao_abort.
- *
- * If the operation was indeed cut short due to the abort request, it
- * will complete, at some point in the future, with ERROR_ABORTED. In
- * that case, depending on the operation it have performed some of the
- * work in question and left the operation half-done. Consult the
- * documentation for individual operations.
- *
- * Note that an aborted operation might still fail for other reasons
- * even after the abort was requested.
- *
- * If your application is multithreaded you must not reuse an
- * ao_how->for_event or ao_how->for_callback value (with a particular
- * ao_how->callback) unless you are sure that none of your other
- * threads are going to abort the previous operation using that
- * value; otherwise you risk aborting the wrong operation if the
- * intended target of the abort request completes in the meantime.
- *
- * It is possible to abort even an operation which is being performed
- * synchronously, but since in that case how==NULL you had better only
- * have one such operation, because it is not possible to tell them
- * apart (and libxl_ao_abort will abort only the first one it finds).
- * (And, if you want to do this, obviously the abort would have to be
- * requested on a different thread.)
- */
-int libxl_ao_abort(libxl_ctx *ctx, const libxl_asyncop_how *how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-
-#define LIBXL_VERSION 0
-
-/* context functions */
-int libxl_ctx_alloc(libxl_ctx **pctx, int version,
- unsigned flags /* none currently defined */,
- xentoollog_logger *lg);
-int libxl_ctx_free(libxl_ctx *ctx /* 0 is OK */);
-
-/* domain related functions */
-
-#define INVALID_DOMID ~0
-#define RANDOM_DOMID (INVALID_DOMID - 1)
-
-/* If the result is ERROR_ABORTED, the domain may or may not exist
- * (in a half-created state). *domid will be valid and will be the
- * domain id, or INVALID_DOMID, as appropriate */
-
-int libxl_domain_create_new(libxl_ctx *ctx, libxl_domain_config *d_config,
- uint32_t *domid,
- const libxl_asyncop_how *ao_how,
- const libxl_asyncprogress_how *aop_console_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_domain_create_restore(libxl_ctx *ctx, libxl_domain_config *d_config,
- uint32_t *domid, int restore_fd,
- int send_back_fd,
- const libxl_domain_restore_params *params,
- const libxl_asyncop_how *ao_how,
- const libxl_asyncprogress_how *aop_console_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040400
-
-static inline int libxl_domain_create_restore_0x040200(
- libxl_ctx *ctx, libxl_domain_config *d_config,
- uint32_t *domid, int restore_fd,
- const libxl_asyncop_how *ao_how,
- const libxl_asyncprogress_how *aop_console_how)
- LIBXL_EXTERNAL_CALLERS_ONLY
-{
- libxl_domain_restore_params params;
- int ret;
-
- libxl_domain_restore_params_init(¶ms);
-
- ret = libxl_domain_create_restore(
- ctx, d_config, domid, restore_fd, -1, ¶ms, ao_how, aop_console_how);
-
- libxl_domain_restore_params_dispose(¶ms);
- return ret;
-}
-
-#define libxl_domain_create_restore libxl_domain_create_restore_0x040200
-
-#elif defined(LIBXL_API_VERSION) && LIBXL_API_VERSION >= 0x040400 \
- && LIBXL_API_VERSION < 0x040700
-
-static inline int libxl_domain_create_restore_0x040400(
- libxl_ctx *ctx, libxl_domain_config *d_config,
- uint32_t *domid, int restore_fd,
- const libxl_domain_restore_params *params,
- const libxl_asyncop_how *ao_how,
- const libxl_asyncprogress_how *aop_console_how)
- LIBXL_EXTERNAL_CALLERS_ONLY
-{
- return libxl_domain_create_restore(ctx, d_config, domid, restore_fd,
- -1, params, ao_how, aop_console_how);
-}
-
-#define libxl_domain_create_restore libxl_domain_create_restore_0x040400
-
-#endif
-
-int libxl_domain_soft_reset(libxl_ctx *ctx,
- libxl_domain_config *d_config,
- uint32_t domid,
- const libxl_asyncop_how *ao_how,
- const libxl_asyncprogress_how
- *aop_console_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
- /* A progress report will be made via ao_console_how, of type
- * domain_create_console_available, when the domain's primary
- * console is available and can be connected to.
- */
-
-void libxl_domain_config_init(libxl_domain_config *d_config);
-void libxl_domain_config_dispose(libxl_domain_config *d_config);
-
-/*
- * Retrieve domain configuration and filled it in d_config. The
- * returned configuration can be used to rebuild a domain. It only
- * works with DomU.
- */
-int libxl_retrieve_domain_configuration(libxl_ctx *ctx, uint32_t domid,
- libxl_domain_config *d_config,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
-static inline int libxl_retrieve_domain_configuration_0x041200(
- libxl_ctx *ctx, uint32_t domid, libxl_domain_config *d_config)
-{
- return libxl_retrieve_domain_configuration(ctx, domid, d_config, NULL);
-}
-#define libxl_retrieve_domain_configuration \
- libxl_retrieve_domain_configuration_0x041200
-#endif
-
-int libxl_domain_suspend(libxl_ctx *ctx, uint32_t domid, int fd,
- int flags, /* LIBXL_SUSPEND_* */
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-#define LIBXL_SUSPEND_DEBUG 1
-#define LIBXL_SUSPEND_LIVE 2
-
-/*
- * Only suspend domain, do not save its state to file, do not destroy it.
- * Suspended domain can be resumed with libxl_domain_resume()
- */
-int libxl_domain_suspend_only(libxl_ctx *ctx, uint32_t domid,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* @param suspend_cancel [from xenctrl.h:xc_domain_resume( @param fast )]
- * If this parameter is true, use co-operative resume. The guest
- * must support this.
- */
-int libxl_domain_resume(libxl_ctx *ctx, uint32_t domid, int suspend_cancel,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/*
- * This function doesn't return unless something has gone wrong with
- * the replication to the secondary. If this function returns then the
- * caller should resume the (primary) domain.
- */
-int libxl_domain_remus_start(libxl_ctx *ctx, libxl_domain_remus_info *info,
- uint32_t domid, int send_fd, int recv_fd,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-int libxl_domain_shutdown(libxl_ctx *ctx, uint32_t domid,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_domain_reboot(libxl_ctx *ctx, uint32_t domid,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
-static inline int libxl_domain_shutdown_0x041200(libxl_ctx *ctx,
- uint32_t domid)
-{
- return libxl_domain_shutdown(ctx, domid, NULL);
-}
-#define libxl_domain_shutdown libxl_domain_shutdown_0x041200
-static inline int libxl_domain_reboot_0x041200(libxl_ctx *ctx,
- uint32_t domid)
-{
- return libxl_domain_reboot(ctx, domid, NULL);
-}
-#define libxl_domain_reboot libxl_domain_reboot_0x041200
-#endif
-
-int libxl_domain_destroy(libxl_ctx *ctx, uint32_t domid,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_domain_preserve(libxl_ctx *ctx, uint32_t domid, libxl_domain_create_info *info, const char *name_suffix, libxl_uuid new_uuid);
-
-/* get max. number of cpus supported by hypervisor */
-int libxl_get_max_cpus(libxl_ctx *ctx);
-
-/* get the actual number of currently online cpus on the host */
-int libxl_get_online_cpus(libxl_ctx *ctx);
- /* Beware that no locking or serialization is provided by libxl,
- * so the information can be outdated as far as the function
- * returns. If there are other entities in the system capable
- * of onlining/offlining CPUs, it is up to the application
- * to guarantee consistency, if that is important. */
-
-/* get max. number of NUMA nodes supported by hypervisor */
-int libxl_get_max_nodes(libxl_ctx *ctx);
-
-int libxl_domain_rename(libxl_ctx *ctx, uint32_t domid,
- const char *old_name, const char *new_name);
-
- /* if old_name is NULL, any old name is OK; otherwise we check
- * transactionally that the domain has the old old name; if
- * trans is not 0 we use caller's transaction and caller must do retries */
-
-int libxl_domain_pause(libxl_ctx *ctx, uint32_t domid,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_domain_unpause(libxl_ctx *ctx, uint32_t domid,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
-static inline int libxl_domain_pause_0x041200(
- libxl_ctx *ctx, uint32_t domid)
-{
- return libxl_domain_pause(ctx, domid, NULL);
-}
-static inline int libxl_domain_unpause_0x041200(
- libxl_ctx *ctx, uint32_t domid)
-{
- return libxl_domain_unpause(ctx, domid, NULL);
-}
-#define libxl_domain_pause libxl_domain_pause_0x041200
-#define libxl_domain_unpause libxl_domain_unpause_0x041200
-#endif
-
-
-int libxl_domain_core_dump(libxl_ctx *ctx, uint32_t domid,
- const char *filename,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-int libxl_domain_setmaxmem(libxl_ctx *ctx, uint32_t domid, uint64_t target_memkb);
-int libxl_set_memory_target(libxl_ctx *ctx, uint32_t domid, int64_t target_memkb, int relative, int enforce);
-int libxl_get_memory_target(libxl_ctx *ctx, uint32_t domid, uint64_t *out_target);
-int libxl_get_memory_target_0x040700(libxl_ctx *ctx, uint32_t domid,
- uint32_t *out_target)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/*
- * WARNING
- * This memory management API is unstable even in Xen 4.2.
- * It has a numer of deficiencies and we intend to replace it.
- *
- * The semantics of these functions should not be relied on to be very
- * coherent or stable. We will however endeavour to keep working
- * existing programs which use them in roughly the same way as libxl.
- */
-/* how much free memory in the system a domain needs to be built */
-int libxl_domain_need_memory(libxl_ctx *ctx,
- libxl_domain_config *config
- /* ^ will be partially defaulted */,
- uint32_t domid_for_logging /* INVALID_DOMID ok */,
- uint64_t *need_memkb);
-int libxl_domain_need_memory_0x041200(libxl_ctx *ctx,
- const libxl_domain_build_info *b_info_in,
- uint64_t *need_memkb);
-int libxl_domain_need_memory_0x040700(libxl_ctx *ctx,
- const libxl_domain_build_info *b_info_in,
- uint32_t *need_memkb)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-/* how much free memory is available in the system */
-int libxl_get_free_memory(libxl_ctx *ctx, uint64_t *memkb);
-int libxl_get_free_memory_0x040700(libxl_ctx *ctx, uint32_t *memkb)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-/* wait for a given amount of memory to be free in the system */
-int libxl_wait_for_free_memory(libxl_ctx *ctx, uint32_t domid, uint64_t memory_kb, int wait_secs);
-/*
- * Wait for the memory target of a domain to be reached. Does not
- * decrement wait_secs if the domain is making progress toward reaching
- * the target. If the domain is not making progress, wait_secs is
- * decremented. If the timeout expires before the target is reached, the
- * function returns ERROR_FAIL.
- *
- * Older versions of this function (Xen 4.5 and older), decremented
- * wait_secs even if the domain was making progress, resulting in far
- * lower overall wait times. To make sure that your calling routine
- * works with new and old implementations of the function, pass enough
- * time for the guest to reach its target as an argument.
- */
-int libxl_wait_for_memory_target(libxl_ctx *ctx, uint32_t domid, int wait_secs);
-
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040800
-#define libxl_get_memory_target libxl_get_memory_target_0x040700
-#define libxl_domain_need_memory libxl_domain_need_memory_0x040700
-#define libxl_get_free_memory libxl_get_free_memory_0x040700
-#elif defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
-#define libxl_domain_need_memory libxl_domain_need_memory_0x041200
-#endif
-
-int libxl_vncviewer_exec(libxl_ctx *ctx, uint32_t domid, int autopass);
-
-/*
- * If notify_fd is not -1, xenconsole will write 0x00 to it to nofity
- * the caller that it has connected to the guest console.
- */
-int libxl_console_exec(libxl_ctx *ctx, uint32_t domid, int cons_num,
- libxl_console_type type, int notify_fd);
-/* libxl_primary_console_exec finds the domid and console number
- * corresponding to the primary console of the given vm, then calls
- * libxl_console_exec with the right arguments (domid might be different
- * if the guest is using stubdoms).
- * This function can be called after creating the device model, in
- * case of HVM guests, and before libxl_run_bootloader in case of PV
- * guests using pygrub.
- * If notify_fd is not -1, xenconsole will write 0x00 to it to nofity
- * the caller that it has connected to the guest console.
- */
-int libxl_primary_console_exec(libxl_ctx *ctx, uint32_t domid_vm,
- int notify_fd);
-
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040800
-
-static inline int libxl_console_exec_0x040700(libxl_ctx *ctx,
- uint32_t domid, int cons_num,
- libxl_console_type type)
-{
- return libxl_console_exec(ctx, domid, cons_num, type, -1);
-}
-#define libxl_console_exec libxl_console_exec_0x040700
-
-static inline int libxl_primary_console_exec_0x040700(libxl_ctx *ctx,
- uint32_t domid_vm)
-{
- return libxl_primary_console_exec(ctx, domid_vm, -1);
-}
-#define libxl_primary_console_exec libxl_primary_console_exec_0x040700
-
-#endif
-
-/* libxl_console_get_tty retrieves the specified domain's console tty path
- * and stores it in path. Caller is responsible for freeing the memory.
- */
-int libxl_console_get_tty(libxl_ctx *ctx, uint32_t domid, int cons_num,
- libxl_console_type type, char **path);
-
-/* libxl_primary_console_get_tty retrieves the specified domain's primary
- * console tty path and stores it in path. Caller is responsible for freeing
- * the memory.
- */
-int libxl_primary_console_get_tty(libxl_ctx *ctx, uint32_t domid_vm, char **path);
-
-/* May be called with info_r == NULL to check for domain's existence.
- * Returns ERROR_DOMAIN_NOTFOUND if domain does not exist (used to return
- * ERROR_INVAL for this scenario). */
-int libxl_domain_info(libxl_ctx*, libxl_dominfo *info_r,
- uint32_t domid);
-
-/* These functions each return (on success) an array of elements,
- * and the length via the int* out parameter. These arrays and
- * their contents come from malloc, and must be freed with the
- * corresponding libxl_THING_list_free function.
- */
-libxl_dominfo * libxl_list_domain(libxl_ctx*, int *nb_domain_out);
-void libxl_dominfo_list_free(libxl_dominfo *list, int nb_domain);
-
-libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx*, int *nb_pool_out);
-void libxl_cpupoolinfo_list_free(libxl_cpupoolinfo *list, int nb_pool);
-
-libxl_vminfo * libxl_list_vm(libxl_ctx *ctx, int *nb_vm_out);
-void libxl_vminfo_list_free(libxl_vminfo *list, int nb_vm);
-
-#define LIBXL_CPUTOPOLOGY_INVALID_ENTRY (~(uint32_t)0)
-libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nb_cpu_out);
-void libxl_cputopology_list_free(libxl_cputopology *, int nb_cpu);
-
-#define LIBXL_PCITOPOLOGY_INVALID_ENTRY (~(uint32_t)0)
-libxl_pcitopology *libxl_get_pci_topology(libxl_ctx *ctx, int *num_devs);
-void libxl_pcitopology_list_free(libxl_pcitopology *, int num_devs);
-
-#define LIBXL_NUMAINFO_INVALID_ENTRY (~(uint32_t)0)
-libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr);
-void libxl_numainfo_list_free(libxl_numainfo *, int nr);
-
-libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
- int *nb_vcpu, int *nr_cpus_out);
-void libxl_vcpuinfo_list_free(libxl_vcpuinfo *, int nr_vcpus);
-
-/*
- * Devices
- * =======
- *
- * Each device is represented by a libxl_device_<TYPE> data structure
- * which is defined via the IDL. In addition some devices have an
- * additional data type libxl_device_<TYPE>_getinfo which contains
- * further runtime information about the device.
- *
- * In addition to the general methods available for libxl types (see
- * "libxl types" above) a common set of methods are available for each
- * device type. These are described below.
- *
- * Querying
- * --------
- *
- * libxl_device_<type>_list(ctx, domid, nr):
- *
- * Returns an array of libxl_device_<type> length nr representing
- * the devices attached to the specified domain.
- *
- * libxl_device_<type>_getinfo(ctx, domid, device, info):
- *
- * Initialises info with details of the given device which must be
- * attached to the specified domain.
- *
- * Creation / Control
- * ------------------
- *
- * libxl_device_<type>_add(ctx, domid, device):
- *
- * Adds the given device to the specified domain. This can be called
- * while the guest is running (hotplug) or before boot (coldplug).
- *
- * This function only sets up the device but does not wait for the
- * domain to connect to the device and therefore cannot block on the
- * guest.
- *
- * device is an in/out parameter: fields left unspecified when the
- * structure is passed in are filled in with appropriate values for
- * the device created.
- *
- * libxl_device_<type>_destroy(ctx, domid, device):
- *
- * Removes the given device from the specified domain without guest
- * co-operation. It is guest specific what affect this will have on
- * a running guest.
- *
- * This function does not interact with the guest and therefore
- * cannot block on the guest.
- *
- * libxl_device_<type>_remove(ctx, domid, device):
- *
- * Removes the given device from the specified domain by performing
- * an orderly unplug with guest co-operation. This requires that the
- * guest is running.
- *
- * This method is currently synchronous and therefore can block
- * while interacting with the guest. There is a time-out of 10s on
- * this interaction after which libxl_device_<type>_destroy()
- * semantics apply.
- *
- * libxl_device_<type>_safe_remove(ctx, domid, device):
- *
- * This has the same semantics as libxl_device_<type>_remove() but,
- * in the event of hitting the 10s time-out, this function will fail.
- *
- * Controllers
- * -----------
- *
- * Most devices are treated individually. Some classes of device,
- * however, like USB or SCSI, inherently have the need to have a
- * hierarchy of different levels, with lower-level devices "attached"
- * to higher-level ones. USB for instance has "controllers" at the
- * top, which have buses, on which are devices, which consist of
- * multiple interfaces. SCSI has "hosts" at the top, then buses,
- * targets, and LUNs.
- *
- * In that case, for each <class>, there will be a set of functions
- * and types for each <level>. For example, for <class>=usb, there
- * may be <levels> ctrl (controller) and dev (device), with ctrl being
- * level 0.
- *
- * libxl_device_<class><level0>_<function> will act more or
- * less like top-level non-bus devices: they will either create or
- * accept a libxl_devid which will be unique within the
- * <class><level0> libxl_devid namespace.
- *
- * Lower-level devices must have a unique way to be identified. One
- * way to do this would be to name it via the name of the next level
- * up plus an index; for instance, <ctrl devid, port number>. Another
- * way would be to have another devid namespace for that level. This
- * identifier will be used for queries and removals.
- *
- * Lower-level devices will include in their
- * libxl_device_<class><level> struct a field referring to the unique
- * index of the level above. For instance, libxl_device_usbdev might
- * contain the controller devid.
- *
- * In the case where there are multiple different ways to implement a
- * given device -- for instance, one which is fully PV and one which
- * uses an emulator -- the controller will contain a field which
- * specifies what type of implementation is used. The implementations
- * of individual devices will be known by the controller to which they
- * are attached.
- *
- * If libxl_device_<class><level>_add receives an empty reference to
- * the level above, it may return an error. Or it may (but is not
- * required to) automatically choose a suitable device in the level
- * above to which to attach the new device at this level. It may also
- * (but is not required to) automatically create a new device at the
- * level above if no suitable devices exist. Each class should
- * document its behavior.
- *
- * libxl_device_<class><level>_list will list all devices of <class>
- * at <level> in the domain. For example, libxl_device_usbctrl_list
- * will list all usb controllers; libxl_class_usbdev_list will list
- * all usb devices across all controllers.
- *
- * For each class, the domain config file will contain a single list
- * for each level. libxl will first iterate through the list of
- * top-level devices, then iterate through each level down in turn,
- * adding devices to devices in the level above. For instance, there
- * will be one list for all usb controllers, and one list for all usb
- * devices.
- *
- * If libxl_device_<class><level>_add automatically creates
- * higher-level devices as necessary, then it is permissible for the
- * higher-level lists to be empty and the device list to have devices
- * with the field containing a reference to the higher level device
- * uninitialized.
- */
-
-/* Disks */
-int libxl_device_disk_add(libxl_ctx *ctx, uint32_t domid,
- libxl_device_disk *disk,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_disk_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_disk *disk,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_disk_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_disk *disk,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_disk_safe_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_disk *disk,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-libxl_device_disk *libxl_device_disk_list(libxl_ctx *ctx,
- uint32_t domid, int *num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-void libxl_device_disk_list_free(libxl_device_disk* list, int num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_disk_getinfo(libxl_ctx *ctx, uint32_t domid,
- const libxl_device_disk *disk, libxl_diskinfo *diskinfo)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/*
- * Insert a CD-ROM device. A device corresponding to disk must already
- * be attached to the guest.
- */
-int libxl_cdrom_insert(libxl_ctx *ctx, uint32_t domid, libxl_device_disk *disk,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/*
- * USB
- *
- * For each device removed or added, one of these protocols is available:
- * - PV (i.e., PVUSB)
- * - DEVICEMODEL (i.e, qemu)
- *
- * PV is available for either PV or HVM domains. DEVICEMODEL is only
- * available for HVM domains. The caller can additionally specify
- * "AUTO", in which case the library will try to determine the best
- * protocol automatically.
- *
- * At the moment, the only protocol implemented is PV.
- *
- * One can add/remove USB controllers to/from guest, and attach/detach USB
- * devices to/from USB controllers.
- *
- * To add USB controllers and USB devices, one can adding USB controllers
- * first and then attaching USB devices to some USB controller, or adding
- * USB devices to guest directly, it will automatically create a USB
- * controller for USB devices to attach.
- *
- * To remove USB controllers or USB devices, one can remove USB devices
- * under USB controller one by one and then remove USB controller, or
- * remove USB controller directly, it will remove all USB devices under
- * it automatically.
- *
- */
-/* USB Controllers*/
-int libxl_device_usbctrl_add(libxl_ctx *ctx, uint32_t domid,
- libxl_device_usbctrl *usbctrl,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-int libxl_device_usbctrl_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_usbctrl *usbctrl,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-int libxl_device_usbctrl_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_usbctrl *usbctrl,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-libxl_device_usbctrl *libxl_device_usbctrl_list(libxl_ctx *ctx,
- uint32_t domid, int *num);
-
-void libxl_device_usbctrl_list_free(libxl_device_usbctrl *list, int nr);
-
-
-int libxl_device_usbctrl_getinfo(libxl_ctx *ctx, uint32_t domid,
- const libxl_device_usbctrl *usbctrl,
- libxl_usbctrlinfo *usbctrlinfo);
-
-/* USB Devices */
-
-int libxl_device_usbdev_add(libxl_ctx *ctx, uint32_t domid,
- libxl_device_usbdev *usbdev,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-int libxl_device_usbdev_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_usbdev *usbdev,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-libxl_device_usbdev *
-libxl_device_usbdev_list(libxl_ctx *ctx, uint32_t domid, int *num);
-
-void libxl_device_usbdev_list_free(libxl_device_usbdev *list, int nr);
-
-/* Network Interfaces */
-int libxl_device_nic_add(libxl_ctx *ctx, uint32_t domid, libxl_device_nic *nic,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_nic_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_nic *nic,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_nic_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_nic *nic,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-libxl_device_nic *libxl_device_nic_list(libxl_ctx *ctx,
- uint32_t domid, int *num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-void libxl_device_nic_list_free(libxl_device_nic* list, int num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_nic_getinfo(libxl_ctx *ctx, uint32_t domid,
- const libxl_device_nic *nic, libxl_nicinfo *nicinfo)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/*
- * Virtual Channels
- * Channels manifest as consoles with names, see docs/misc/channels.txt
- */
-libxl_device_channel *libxl_device_channel_list(libxl_ctx *ctx,
- uint32_t domid,
- int *num);
-int libxl_device_channel_getinfo(libxl_ctx *ctx, uint32_t domid,
- const libxl_device_channel *channel,
- libxl_channelinfo *channelinfo);
-
-/* Virtual TPMs */
-int libxl_device_vtpm_add(libxl_ctx *ctx, uint32_t domid, libxl_device_vtpm *vtpm,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vtpm_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vtpm *vtpm,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vtpm_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vtpm *vtpm,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-libxl_device_vtpm *libxl_device_vtpm_list(libxl_ctx *ctx,
- uint32_t domid, int *num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-void libxl_device_vtpm_list_free(libxl_device_vtpm*, int num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vtpm_getinfo(libxl_ctx *ctx, uint32_t domid,
- const libxl_device_vtpm *vtpm, libxl_vtpminfo *vtpminfo)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* Virtual displays */
-int libxl_device_vdispl_add(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vdispl *displ,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vdispl_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vdispl *vdispl,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vdispl_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vdispl *vdispl,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-libxl_device_vdispl *libxl_device_vdispl_list(libxl_ctx *ctx,
- uint32_t domid, int *num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-void libxl_device_vdispl_list_free(libxl_device_vdispl* list, int num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vdispl_getinfo(libxl_ctx *ctx, uint32_t domid,
- const libxl_device_vdispl *vdispl,
- libxl_vdisplinfo *vdisplinfo)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* Virtual sounds */
-int libxl_device_vsnd_add(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vsnd *vsnd,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vsnd_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vsnd *vsnd,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vsnd_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vsnd *vsnd,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-libxl_device_vsnd *libxl_device_vsnd_list(libxl_ctx *ctx,
- uint32_t domid, int *num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-void libxl_device_vsnd_list_free(libxl_device_vsnd* list, int num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vsnd_getinfo(libxl_ctx *ctx, uint32_t domid,
- const libxl_device_vsnd *vsnd,
- libxl_vsndinfo *vsndlinfo)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* Keyboard */
-int libxl_device_vkb_add(libxl_ctx *ctx, uint32_t domid, libxl_device_vkb *vkb,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vkb_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vkb *vkb,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vkb_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vkb *vkb,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-libxl_device_vkb *libxl_device_vkb_list(libxl_ctx *ctx,
- uint32_t domid, int *num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-void libxl_device_vkb_list_free(libxl_device_vkb* list, int num)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vkb_getinfo(libxl_ctx *ctx, uint32_t domid,
- const libxl_device_vkb *vkb,
- libxl_vkbinfo *vkbinfo)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* Framebuffer */
-int libxl_device_vfb_add(libxl_ctx *ctx, uint32_t domid, libxl_device_vfb *vfb,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vfb_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vfb *vfb,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_vfb_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_vfb *vfb,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* 9pfs */
-int libxl_device_p9_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_p9 *p9,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_p9_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_p9 *p9,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* pvcalls interface */
-int libxl_device_pvcallsif_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_pvcallsif *pvcallsif,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_pvcallsif_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_pvcallsif *pvcallsif,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* PCI Passthrough */
-int libxl_device_pci_add(libxl_ctx *ctx, uint32_t domid,
- libxl_device_pci *pcidev,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_pci_remove(libxl_ctx *ctx, uint32_t domid,
- libxl_device_pci *pcidev,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-int libxl_device_pci_destroy(libxl_ctx *ctx, uint32_t domid,
- libxl_device_pci *pcidev,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-libxl_device_pci *libxl_device_pci_list(libxl_ctx *ctx, uint32_t domid,
- int *num);
-
-/*
- * Turns the current process into a backend device service daemon
- * for a driver domain.
- *
- * From a libxl API point of view, this starts a long-running
- * operation. That operation consists of "being a driver domain"
- * and never completes.
- *
- * Attempting to abort this operation is not advisable; proper
- * shutdown of the driver domain task is not supported.
- */
-int libxl_device_events_handler(libxl_ctx *ctx,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/*
- * Functions related to making devices assignable -- that is, bound to
- * the pciback driver, ready to be given to a guest via
- * libxl_pci_device_add.
- *
- * - ..._add() will unbind the device from its current driver (if
- * already bound) and re-bind it to pciback; at that point it will be
- * ready to be assigned to a VM. If rebind is set, it will store the
- * path to the old driver in xenstore so that it can be handed back to
- * dom0 on restore.
- *
- * - ..._remove() will unbind the device from pciback, and if
- * rebind is non-zero, attempt to assign it back to the driver
- * from whence it came.
- *
- * - ..._list() will return a list of the PCI devices available to be
- * assigned.
- *
- * add and remove are idempotent: if the device in question is already
- * added or is not bound, the functions will emit a warning but return
- * SUCCESS.
- */
-int libxl_device_pci_assignable_add(libxl_ctx *ctx, libxl_device_pci *pcidev, int rebind);
-int libxl_device_pci_assignable_remove(libxl_ctx *ctx, libxl_device_pci *pcidev, int rebind);
-libxl_device_pci *libxl_device_pci_assignable_list(libxl_ctx *ctx, int *num);
-
-/* CPUID handling */
-int libxl_cpuid_parse_config(libxl_cpuid_policy_list *cpuid, const char* str);
-int libxl_cpuid_parse_config_xend(libxl_cpuid_policy_list *cpuid,
- const char* str);
-#if LIBXL_API_VERSION < 0x041400
-/*
- * Dropped from the API in Xen 4.14. At the time of writing, these functions
- * don't appear to ever have had external callers.
- *
- * These have always been used internally during domain construction, and
- * can't easily be used externally because of their implicit parameters in
- * other pieces of global state.
- *
- * Furthermore, an API user can't usefully determine whether they get
- * libxl_cpuid (the real implementation) or libxl_nocpuid (no-op stubs).
- *
- * The internal behaviour of these functions also needs to change. Therefore
- * for simplicitly, provide the no-op stubs. Yes technically this is an API
- * change in some cases for existing software, but there is 0 of that in
- * practice.
- */
-static inline void libxl_cpuid_apply_policy(libxl_ctx *ctx __attribute__((unused)),
- uint32_t domid __attribute__((unused)))
-{}
-static inline void libxl_cpuid_set(libxl_ctx *ctx __attribute__((unused)),
- uint32_t domid __attribute__((unused)),
- libxl_cpuid_policy_list cpuid __attribute__((unused)))
-{}
-#endif
-
-/*
- * Functions for allowing users of libxl to store private data
- * relating to a domain. The data is an opaque sequence of bytes and
- * is not interpreted or used by libxl.
- *
- * Data is indexed by the userdata userid, which is a short printable
- * ASCII string. The following list is a registry of userdata userids
- * (the registry may be updated by posting a patch to xen-devel):
- *
- * userid Data contents
- * "xl" domain config file in xl format, Unix line endings
- * "libvirt-xml" domain config file in libvirt XML format. See
- * http://libvirt.org/formatdomain.html
- * "domain-userdata-lock" lock file to protect domain userdata in libxl.
- * It's a per-domain lock. Applications should
- * not touch this file.
- * "libxl-json" libxl_domain_config object in JSON format, generated
- * by libxl. Applications should not access this file
- * directly. This file is protected by domain-userdata-lock
- * for against Read-Modify-Write operation and domain
- * destruction.
- *
- * libxl does not enforce the registration of userdata userids or the
- * semantics of the data. For specifications of the data formats
- * see the code or documentation for the libxl caller in question.
- */
-int libxl_userdata_store(libxl_ctx *ctx, uint32_t domid,
- const char *userdata_userid,
- const uint8_t *data, int datalen)
- LIBXL_EXTERNAL_CALLERS_ONLY;
- /* If datalen==0, data is not used and the user data for
- * that domain and userdata_userid is deleted. */
-int libxl_userdata_retrieve(libxl_ctx *ctx, uint32_t domid,
- const char *userdata_userid,
- uint8_t **data_r, int *datalen_r)
- LIBXL_EXTERNAL_CALLERS_ONLY;
- /* On successful return, *data_r is from malloc.
- * If there is no data for that domain and userdata_userid,
- * *data_r and *datalen_r will be set to 0.
- * data_r and datalen_r may be 0.
- * On error return, *data_r and *datalen_r are undefined.
- */
-int libxl_userdata_unlink(libxl_ctx *ctx, uint32_t domid,
- const char *userdata_userid);
-
-
-int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo);
-int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
- const libxl_bitmap *cpumap_hard,
- const libxl_bitmap *cpumap_soft);
-int libxl_set_vcpuaffinity_force(libxl_ctx *ctx, uint32_t domid,
- uint32_t vcpuid,
- const libxl_bitmap *cpumap_hard,
- const libxl_bitmap *cpumap_soft);
-int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid,
- unsigned int max_vcpus,
- const libxl_bitmap *cpumap_hard,
- const libxl_bitmap *cpumap_soft);
-
-#if defined (LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040500
-
-#define libxl_set_vcpuaffinity(ctx, domid, vcpuid, map) \
- libxl_set_vcpuaffinity((ctx), (domid), (vcpuid), (map), NULL)
-#define libxl_set_vcpuaffinity_all(ctx, domid, max_vcpus, map) \
- libxl_set_vcpuaffinity_all((ctx), (domid), (max_vcpus), (map), NULL)
-
-#endif
-
-int libxl_domain_set_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
- libxl_bitmap *nodemap);
-int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, uint32_t domid,
- libxl_bitmap *nodemap);
-int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid,
- libxl_bitmap *cpumap,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
-static inline int libxl_set_vcpuonline_0x041200(libxl_ctx *ctx,
- uint32_t domid,
- libxl_bitmap *cpumap)
-{
- return libxl_set_vcpuonline(ctx, domid, cpumap, NULL);
-}
-#define libxl_set_vcpuonline libxl_set_vcpuonline_0x041200
-#endif
-
-/* A return value less than 0 should be interpreted as a libxl_error, while a
- * return value greater than or equal to 0 should be interpreted as a
- * libxl_scheduler. */
-int libxl_get_scheduler(libxl_ctx *ctx);
-
-/* Per-scheduler parameters */
-int libxl_sched_credit_params_get(libxl_ctx *ctx, uint32_t poolid,
- libxl_sched_credit_params *scinfo);
-int libxl_sched_credit_params_set(libxl_ctx *ctx, uint32_t poolid,
- libxl_sched_credit_params *scinfo);
-int libxl_sched_credit2_params_get(libxl_ctx *ctx, uint32_t poolid,
- libxl_sched_credit2_params *scinfo);
-int libxl_sched_credit2_params_set(libxl_ctx *ctx, uint32_t poolid,
- libxl_sched_credit2_params *scinfo);
-
-/* Scheduler Per-domain parameters */
-
-#define LIBXL_DOMAIN_SCHED_PARAM_WEIGHT_DEFAULT -1
-#define LIBXL_DOMAIN_SCHED_PARAM_CAP_DEFAULT -1
-#define LIBXL_DOMAIN_SCHED_PARAM_PERIOD_DEFAULT -1
-#define LIBXL_DOMAIN_SCHED_PARAM_SLICE_DEFAULT -1
-#define LIBXL_DOMAIN_SCHED_PARAM_LATENCY_DEFAULT -1
-#define LIBXL_DOMAIN_SCHED_PARAM_EXTRATIME_DEFAULT -1
-#define LIBXL_DOMAIN_SCHED_PARAM_BUDGET_DEFAULT -1
-
-/* Per-VCPU parameters */
-#define LIBXL_SCHED_PARAM_VCPU_INDEX_DEFAULT -1
-
-/* Get the per-domain scheduling parameters.
- * For schedulers that support per-vcpu settings (e.g., RTDS),
- * calling *_domain_get functions will get default scheduling
- * parameters.
- */
-int libxl_domain_sched_params_get(libxl_ctx *ctx, uint32_t domid,
- libxl_domain_sched_params *params);
-
-/* Set the per-domain scheduling parameters.
- * For schedulers that support per-vcpu settings (e.g., RTDS),
- * calling *_domain_set functions will set all vcpus with the same
- * scheduling parameters.
- */
-int libxl_domain_sched_params_set(libxl_ctx *ctx, uint32_t domid,
- const libxl_domain_sched_params *params);
-
-/* Get the per-vcpu scheduling parameters */
-int libxl_vcpu_sched_params_get(libxl_ctx *ctx, uint32_t domid,
- libxl_vcpu_sched_params *params);
-
-/* Get the per-vcpu scheduling parameters of all vcpus of a domain */
-int libxl_vcpu_sched_params_get_all(libxl_ctx *ctx, uint32_t domid,
- libxl_vcpu_sched_params *params);
-
-/* Set the per-vcpu scheduling parameters */
-int libxl_vcpu_sched_params_set(libxl_ctx *ctx, uint32_t domid,
- const libxl_vcpu_sched_params *params);
-
-/* Set the per-vcpu scheduling parameters of all vcpus of a domain */
-int libxl_vcpu_sched_params_set_all(libxl_ctx *ctx, uint32_t domid,
- const libxl_vcpu_sched_params *params);
-
-int libxl_send_trigger(libxl_ctx *ctx, uint32_t domid,
- libxl_trigger trigger, uint32_t vcpuid,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
-static inline int libxl_send_trigger_0x041200(
- libxl_ctx *ctx, uint32_t domid, libxl_trigger trigger, uint32_t vcpuid)
-{
- return libxl_send_trigger(ctx, domid, trigger, vcpuid, NULL);
-}
-#define libxl_send_trigger libxl_send_trigger_0x041200
-#endif
-int libxl_send_sysrq(libxl_ctx *ctx, uint32_t domid, char sysrq);
-int libxl_send_debug_keys(libxl_ctx *ctx, char *keys);
-int libxl_set_parameters(libxl_ctx *ctx, char *params);
-
-typedef struct libxl__xen_console_reader libxl_xen_console_reader;
-
-libxl_xen_console_reader *
- libxl_xen_console_read_start(libxl_ctx *ctx, int clear);
-int libxl_xen_console_read_line(libxl_ctx *ctx,
- libxl_xen_console_reader *cr,
- char **line_r);
-void libxl_xen_console_read_finish(libxl_ctx *ctx,
- libxl_xen_console_reader *cr);
-
-uint32_t libxl_vm_get_start_time(libxl_ctx *ctx, uint32_t domid);
-
-char *libxl_tmem_list(libxl_ctx *ctx, uint32_t domid, int use_long);
-int libxl_tmem_freeze(libxl_ctx *ctx, uint32_t domid);
-int libxl_tmem_thaw(libxl_ctx *ctx, uint32_t domid);
-int libxl_tmem_set(libxl_ctx *ctx, uint32_t domid, char* name,
- uint32_t set);
-int libxl_tmem_shared_auth(libxl_ctx *ctx, uint32_t domid, char* uuid,
- int auth);
-int libxl_tmem_freeable(libxl_ctx *ctx);
-
-int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap);
-
-/*
- * Set poolid to LIBXL_CPUOOL_POOLID_ANY to have Xen choose a
- * free poolid for you.
- */
-#define LIBXL_CPUPOOL_POOLID_ANY 0xFFFFFFFF
-int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
- libxl_scheduler sched,
- libxl_bitmap cpumap, libxl_uuid *uuid,
- uint32_t *poolid);
-int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t poolid);
-int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid);
-int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu);
-int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus);
-int libxl_cpupool_cpuadd_cpumap(libxl_ctx *ctx, uint32_t poolid,
- const libxl_bitmap *cpumap);
-int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
-int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus);
-int libxl_cpupool_cpuremove_cpumap(libxl_ctx *ctx, uint32_t poolid,
- const libxl_bitmap *cpumap);
-int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
-int libxl_cpupool_info(libxl_ctx *ctx, libxl_cpupoolinfo *info, uint32_t poolid);
-
-int libxl_domid_valid_guest(uint32_t domid);
-
-int libxl_flask_context_to_sid(libxl_ctx *ctx, char *buf, size_t len,
- uint32_t *ssidref);
-int libxl_flask_sid_to_context(libxl_ctx *ctx, uint32_t ssidref, char **buf,
- size_t *len);
-int libxl_flask_getenforce(libxl_ctx *ctx);
-int libxl_flask_setenforce(libxl_ctx *ctx, int mode);
-int libxl_flask_loadpolicy(libxl_ctx *ctx, void *policy, uint32_t size);
-
-int libxl_ms_vm_genid_generate(libxl_ctx *ctx, libxl_ms_vm_genid *id);
-bool libxl_ms_vm_genid_is_zero(const libxl_ms_vm_genid *id);
-void libxl_ms_vm_genid_copy(libxl_ctx *ctx, libxl_ms_vm_genid *dst,
- const libxl_ms_vm_genid *src);
-
-#if defined(__i386__) || defined(__x86_64__)
-int libxl_psr_cmt_attach(libxl_ctx *ctx, uint32_t domid);
-int libxl_psr_cmt_detach(libxl_ctx *ctx, uint32_t domid);
-int libxl_psr_cmt_domain_attached(libxl_ctx *ctx, uint32_t domid);
-int libxl_psr_cmt_enabled(libxl_ctx *ctx);
-int libxl_psr_cmt_get_total_rmid(libxl_ctx *ctx, uint32_t *total_rmid);
-int libxl_psr_cmt_get_l3_cache_size(libxl_ctx *ctx,
- uint32_t socketid,
- uint32_t *l3_cache_size);
-int libxl_psr_cmt_get_cache_occupancy(libxl_ctx *ctx,
- uint32_t domid,
- uint32_t socketid,
- uint32_t *l3_cache_occupancy);
-
-int libxl_psr_cmt_type_supported(libxl_ctx *ctx, libxl_psr_cmt_type type);
-int libxl_psr_cmt_get_sample(libxl_ctx *ctx,
- uint32_t domid,
- libxl_psr_cmt_type type,
- uint64_t scope,
- uint64_t *sample_r,
- uint64_t *tsc_r);
-
-/*
- * Function to set a domain's cbm. It operates on a single or multiple
- * target(s) defined in 'target_map'. The definition of 'target_map' is
- * related to 'type':
- * 'L3_CBM': 'target_map' specifies all the sockets to be operated on.
- */
-int libxl_psr_cat_set_cbm(libxl_ctx *ctx, uint32_t domid,
- libxl_psr_cbm_type type, libxl_bitmap *target_map,
- uint64_t cbm);
-/*
- * Function to get a domain's cbm. It operates on a single 'target'.
- * The definition of 'target' is related to 'type':
- * 'L3_CBM': 'target' specifies which socket to be operated on.
- */
-int libxl_psr_cat_get_cbm(libxl_ctx *ctx, uint32_t domid,
- libxl_psr_cbm_type type, uint32_t target,
- uint64_t *cbm_r);
-
-/*
- * On success, the function returns an array of elements in 'info',
- * and the length in 'nr'.
- */
-int libxl_psr_cat_get_info(libxl_ctx *ctx, libxl_psr_cat_info **info,
- unsigned int *nr, unsigned int lvl);
-int libxl_psr_cat_get_l3_info(libxl_ctx *ctx, libxl_psr_cat_info **info,
- int *nr);
-void libxl_psr_cat_info_list_free(libxl_psr_cat_info *list, int nr);
-
-typedef enum libxl_psr_cbm_type libxl_psr_type;
-
-/*
- * Function to set a domain's value. It operates on a single or multiple
- * target(s) defined in 'target_map'. 'target_map' specifies all the sockets
- * to be operated on.
- */
-int libxl_psr_set_val(libxl_ctx *ctx, uint32_t domid,
- libxl_psr_type type, libxl_bitmap *target_map,
- uint64_t val);
-/*
- * Function to get a domain's cbm. It operates on a single 'target'.
- * 'target' specifies which socket to be operated on.
- */
-int libxl_psr_get_val(libxl_ctx *ctx, uint32_t domid,
- libxl_psr_type type, unsigned int target,
- uint64_t *val);
-/*
- * On success, the function returns an array of elements in 'info',
- * and the length in 'nr'.
- */
-int libxl_psr_get_hw_info(libxl_ctx *ctx, libxl_psr_feat_type type,
- unsigned int lvl, unsigned int *nr,
- libxl_psr_hw_info **info);
-void libxl_psr_hw_info_list_free(libxl_psr_hw_info *list, unsigned int nr);
-#endif
-
-/* misc */
-
-/* Each of these sets or clears the flag according to whether the
- * 2nd parameter is nonzero. On failure, they log, and
- * return ERROR_FAIL, but also leave errno valid. */
-int libxl_fd_set_cloexec(libxl_ctx *ctx, int fd, int cloexec);
-int libxl_fd_set_nonblock(libxl_ctx *ctx, int fd, int nonblock);
-
-/*
- * Issue a qmp monitor command to the device model of the specified domain.
- * The function returns the output of the command in a new allocated buffer
- * via output.
- */
-int libxl_qemu_monitor_command(libxl_ctx *ctx, uint32_t domid,
- const char *command_line, char **output,
- const libxl_asyncop_how *ao_how)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x041300
-static inline int libxl_qemu_monitor_command_0x041200(libxl_ctx *ctx,
- uint32_t domid, const char *command_line, char **output)
-{
- return libxl_qemu_monitor_command(ctx, domid, command_line, output,
- NULL);
-}
-#define libxl_qemu_monitor_command libxl_qemu_monitor_command_0x041200
-#endif
-
-#include <libxl_event.h>
-
-/*
- * This function is for use only during host initialisation. If it is
- * invoked on a host with running domains, or concurrent libxl
- * processes then the system may malfuntion.
- */
-int libxl_clear_domid_history(libxl_ctx *ctx);
-
-#endif /* LIBXL_H */
-
-/*
- * Local variables:
- * mode: C
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/*
- * Copyright (C) 2011 Citrix Ltd.
- * Author Ian Jackson <ian.jackson@eu.citrix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; version 2.1 only. with the special
- * exception on linking described in file LICENSE.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- */
-
-#ifndef LIBXL_EVENT_H
-#define LIBXL_EVENT_H
-
-#include <libxl.h>
-#include <poll.h>
-#include <sys/time.h>
-
-/*======================================================================*/
-
-/*
- * Domain event handling - getting Xen events from libxl
- *
- * (Callers inside libxl may not call libxl_event_check or _wait.)
- */
-
-#define LIBXL_EVENTMASK_ALL (~(unsigned long)0)
-
-typedef int libxl_event_predicate(const libxl_event*, void *user);
- /* Return value is 0 if the event is unwanted or non-0 if it is.
- * Predicates are not allowed to fail.
- */
-
-int libxl_event_check(libxl_ctx *ctx, libxl_event **event_r,
- uint64_t typemask,
- libxl_event_predicate *predicate, void *predicate_user)
- LIBXL_EXTERNAL_CALLERS_ONLY;
- /* Searches for an event, already-happened, which matches typemask
- * and predicate. predicate==0 matches any event.
- * libxl_event_check returns the event, which must then later be
- * freed by the caller using libxl_event_free.
- *
- * Returns ERROR_NOT_READY if no such event has happened.
- */
-
-int libxl_event_wait(libxl_ctx *ctx, libxl_event **event_r,
- uint64_t typemask,
- libxl_event_predicate *predicate, void *predicate_user)
- LIBXL_EXTERNAL_CALLERS_ONLY;
- /* Like libxl_event_check but blocks if no suitable events are
- * available, until some are. Uses libxl_osevent_beforepoll/
- * _afterpoll so may be inefficient if very many domains are being
- * handled by a single program.
- */
-
-void libxl_event_free(libxl_ctx *ctx, libxl_event *event);
-
-
-/* Alternatively or additionally, the application may also use this: */
-
-typedef struct libxl_event_hooks {
- uint64_t event_occurs_mask;
- void (*event_occurs)(void *user,
-#ifndef LIBXL_HAVE_NONCONST_EVENT_OCCURS_EVENT_ARG
- const
-#endif
- libxl_event *event);
- void (*disaster)(void *user, libxl_event_type type,
- const char *msg, int errnoval);
-} libxl_event_hooks;
-
-void libxl_event_register_callbacks(libxl_ctx *ctx,
- const libxl_event_hooks *hooks, void *user);
- /*
- * Arranges that libxl will henceforth call event_occurs for any
- * events whose type is set in event_occurs_mask, rather than
- * queueing the event for retrieval by libxl_event_check/wait.
- * Events whose bit is clear in mask are not affected.
- *
- * event becomes owned by the application and must be freed, either
- * by event_occurs or later.
- *
- * event_occurs may be NULL if mask is 0.
- *
- * libxl_event_register_callback also provides a way for libxl to
- * report to the application that there was a problem reporting
- * events; this can occur due to lack of host memory during event
- * handling, or other wholly unrecoverable errors from system calls
- * made by libxl. This will not happen for frivolous reasons - only
- * if the system, or the Xen components of it, are badly broken.
- *
- * msg and errnoval will describe the action that libxl was trying
- * to do, and type specifies the type of libxl events which may be
- * missing. type may be 0 in which case events of all types may be
- * missing.
- *
- * disaster may be NULL. If it is, or if _register_callbacks has
- * not been called, errors of this kind are fatal to the entire
- * application: libxl will print messages to its logs and to stderr
- * and call exit(-1).
- *
- * If disaster returns, it may be the case that some or all future
- * libxl calls will return errors; likewise it may be the case that
- * no more events (of the specified type, if applicable) can be
- * produced. An application which supplies a disaster function
- * should normally react either by exiting, or by (when it has
- * returned to its main event loop) shutting down libxl with
- * libxl_ctx_free and perhaps trying to restart it with
- * libxl_ctx_init.
- *
- * In any case before calling disaster, libxl will have logged a
- * message with level XTL_CRITICAL.
- *
- * Reentrancy: it IS permitted to call libxl from within
- * event_occurs. It is NOT permitted to call libxl from within
- * disaster. The event_occurs and disaster callbacks may occur on
- * any thread in which the application calls libxl.
- *
- * libxl_event_register_callbacks may be called as many times, with
- * different parameters, as the application likes; the most recent
- * call determines the libxl behaviour. However it is NOT safe to
- * call _register_callbacks concurrently with, or reentrantly from,
- * any other libxl function, nor while any event-generation
- * facilities are enabled.
- */
-
-
-/*
- * Events are only generated if they have been requested.
- * The following functions request the generation of specific events.
- *
- * Each set of functions for controlling event generation has this form:
- *
- * typedef struct libxl__evgen_FOO libxl__evgen_FOO;
- * int libxl_evenable_FOO(libxl_ctx *ctx, FURTHER PARAMETERS,
- * libxl_ev_user user, libxl__evgen_FOO **evgen_out);
- * void libxl_evdisable_FOO(libxl_ctx *ctx, libxl__evgen_FOO *evgen);
- *
- * The evenable function arranges that the events (as described in the
- * doc comment for the individual function) will start to be generated
- * by libxl. On success, *evgen_out is set to a non-null pointer to
- * an opaque struct.
- *
- * The user value is returned in the generated events and may be
- * used by the caller for whatever it likes. The type ev_user is
- * guaranteed to be an unsigned integer type which is at least
- * as big as uint64_t and is also guaranteed to be big enough to
- * contain any intptr_t value.
- *
- * If it becomes desirable to stop generation of the relevant events,
- * or to reclaim the resources in libxl associated with the evgen
- * structure, the same evgen value should be passed to the evdisable
- * function. However, note that events which occurred prior to the
- * evdisable call may still be returned.
- *
- * The caller may enable identical events more than once. If they do
- * so, each actual occurrence will generate several events to be
- * returned by libxl_event_check, with the appropriate user value(s).
- * Aside from this, each occurrence of each event is returned by
- * libxl_event_check exactly once.
- *
- * An evgen is associated with the libxl_ctx used for its creation.
- * After libxl_ctx_free, all corresponding evgen handles become
- * invalid and must no longer be passed to evdisable.
- *
- * Applications should ensure that they eventually retrieve every
- * event using libxl_event_check or libxl_event_wait, since events
- * which occur but are not retrieved by the application will be queued
- * inside libxl indefinitely. libxl_event_check/_wait may be O(n)
- * where n is the number of queued events which do not match the
- * criteria specified in the arguments to check/wait.
- */
-
-typedef struct libxl__evgen_domain_death libxl_evgen_domain_death;
-int libxl_evenable_domain_death(libxl_ctx *ctx, uint32_t domid,
- libxl_ev_user, libxl_evgen_domain_death **evgen_out);
-void libxl_evdisable_domain_death(libxl_ctx *ctx, libxl_evgen_domain_death*);
- /* Arranges for the generation of DOMAIN_SHUTDOWN and DOMAIN_DEATH
- * events. A domain which is destroyed before it shuts down
- * may generate only a DEATH event.
- */
-
-typedef struct libxl__evgen_disk_eject libxl_evgen_disk_eject;
-int libxl_evenable_disk_eject(libxl_ctx *ctx, uint32_t domid, const char *vdev,
- libxl_ev_user, libxl_evgen_disk_eject **evgen_out);
-void libxl_evdisable_disk_eject(libxl_ctx *ctx, libxl_evgen_disk_eject*);
- /* Arranges for the generation of DISK_EJECT events. A copy of the
- * string *vdev will be made for libxl's internal use, and a pointer
- * to this (or some other) copy will be returned as the vdev
- * member of event.u.
- */
-
-
-/*======================================================================*/
-
-/*
- * OS event handling - passing low-level OS events to libxl
- *
- * Event-driven programs must use these facilities to allow libxl
- * to become aware of readability/writeability of file descriptors
- * and the occurrence of timeouts.
- *
- * There are two approaches available. The first is appropriate for
- * simple programs handling reasonably small numbers of domains:
- *
- * for (;;) {
- * libxl_osevent_beforepoll(...)
- * poll();
- * libxl_osevent_afterpoll(...);
- * for (;;) {
- * r = libxl_event_check(...);
- * if (r==ERROR_NOT_READY) break;
- * if (r) goto error_out;
- * do something with the event;
- * }
- * }
- *
- * The second approach uses libxl_osevent_register_hooks and is
- * suitable for programs which are already using a callback-based
- * event library.
- *
- * An application may freely mix the two styles of interaction.
- *
- * (Callers inside libxl may not call libxl_osevent_... functions.)
- */
-
-struct pollfd;
-
-/* The caller should provide beforepoll with some space for libxl's
- * fds, and tell libxl how much space is available by setting *nfds_io.
- * fds points to the start of this space (and fds may be a pointer into
- * a larger array, for example, if the application has some fds of
- * its own that it is interested in).
- *
- * On return *nfds_io will in any case have been updated by libxl
- * according to how many fds libxl wants to poll on.
- *
- * If the space was sufficient, libxl fills in fds[0..<new
- * *nfds_io>] suitably for poll(2), updates *timeout_upd if needed,
- * and returns ok.
- *
- * If space was insufficient, fds[0..<old *nfds_io>] is undefined on
- * return; *nfds_io on return will be greater than the value on
- * entry; *timeout_upd may or may not have been updated; and
- * libxl_osevent_beforepoll returns ERROR_BUFERFULL. In this case
- * the application needs to make more space (enough space for
- * *nfds_io struct pollfd) and then call beforepoll again, before
- * entering poll(2). Typically this will involve calling realloc.
- *
- * The application may call beforepoll with fds==NULL and
- * *nfds_io==0 in order to find out how much space is needed.
- *
- * *timeout_upd is as for poll(2): it's in milliseconds, and
- * negative values mean no timeout (infinity).
- * libxl_osevent_beforepoll will only reduce the timeout, naturally.
- */
-int libxl_osevent_beforepoll(libxl_ctx *ctx, int *nfds_io,
- struct pollfd *fds, int *timeout_upd,
- struct timeval now)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* nfds and fds[0..nfds] must be from the most recent call to
- * _beforepoll, as modified by poll. (It is therefore not possible
- * to have multiple threads simultaneously polling using this
- * interface.)
- *
- * This function actually performs all of the IO and other actions,
- * and generates events (libxl_event), which are implied by either
- * (a) the time of day or (b) both (i) the returned information from
- * _beforepoll, and (ii) the results from poll specified in
- * fds[0..nfds-1]. Generated events can then be retrieved by
- * libxl_event_check.
- */
-void libxl_osevent_afterpoll(libxl_ctx *ctx, int nfds, const struct pollfd *fds,
- struct timeval now)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-
-typedef struct libxl_osevent_hooks {
- int (*fd_register)(void *user, int fd, void **for_app_registration_out,
- short events, void *for_libxl);
- int (*fd_modify)(void *user, int fd, void **for_app_registration_update,
- short events);
- void (*fd_deregister)(void *user, int fd, void *for_app_registration);
- int (*timeout_register)(void *user, void **for_app_registration_out,
- struct timeval abs, void *for_libxl);
- int (*timeout_modify)(void *user, void **for_app_registration_update,
- struct timeval abs)
- /* only ever called with abs={0,0}, meaning ASAP */;
- void (*timeout_deregister)(void *user, void *for_app_registration)
- /* will never be called */;
-} libxl_osevent_hooks;
-
-/* The application which calls register_fd_hooks promises to
- * maintain a register of fds and timeouts that libxl is interested
- * in, and make calls into libxl (libxl_osevent_occurred_*)
- * when those fd events and timeouts occur. This is more efficient
- * than _beforepoll/_afterpoll if there are many fds (which can
- * happen if the same libxl application is managing many domains).
- *
- * For an fd event, events is as for poll(). register or modify may
- * be called with events==0, in which case it must still work
- * normally, just not generate any events.
- *
- * For a timeout event, milliseconds is as for poll().
- * Specifically, negative values of milliseconds mean NO TIMEOUT.
- * This is used by libxl to temporarily disable a timeout.
- *
- * If the register or modify hook succeeds it may update
- * *for_app_registration_out/_update and must then return 0.
- * On entry to register, *for_app_registration_out is always NULL.
- *
- * A registration or modification hook may fail, in which case it
- * must leave the registration state of the fd or timeout unchanged.
- * It may then either return ERROR_OSEVENT_REG_FAIL or any positive
- * int. The value returned will be passed up through libxl and
- * eventually returned back to the application. When register
- * fails, any value stored into *for_registration_out is ignored by
- * libxl; when modify fails, any changed value stored into
- * *for_registration_update is honoured by libxl and will be passed
- * to future modify or deregister calls.
- *
- * libxl may want to register more than one callback for any one fd;
- * in that case: (i) each such registration will have at least one bit
- * set in revents which is unique to that registration; (ii) if an
- * event occurs which is relevant for multiple registrations the
- * application's event system may call libxl_osevent_occurred_fd
- * for one, some, or all of those registrations.
- *
- * If fd_modify is used, it is permitted for the application's event
- * system to still make calls to libxl_osevent_occurred_fd for the
- * "old" set of requested events; these will be safely ignored by
- * libxl.
- *
- * libxl will remember the value stored in *for_app_registration_out
- * (or *for_app_registration_update) by a successful call to
- * register (or modify), and pass it to subsequent calls to modify
- * or deregister.
- *
- * Note that the application must cope with a call from libxl to
- * timeout_modify racing with its own call to
- * libxl__osevent_occurred_timeout. libxl guarantees that
- * timeout_modify will only be called with abs={0,0} but the
- * application must still ensure that libxl's attempt to cause the
- * timeout to occur immediately is safely ignored even the timeout is
- * actually already in the process of occurring.
- *
- * timeout_deregister is not used because it forms part of a
- * deprecated unsafe mode of use of the API.
- *
- * osevent_register_hooks may be called only once for each libxl_ctx.
- * libxl may make calls to register/modify/deregister from within
- * any libxl function (indeed, it will usually call register from
- * register_event_hooks). Conversely, the application MUST NOT make
- * the event occurrence calls (libxl_osevent_occurred_*) into libxl
- * reentrantly from within libxl (for example, from within the
- * register/modify functions).
- *
- * Lock hierarchy: the register/modify/deregister functions may be
- * called with locks held. These locks (the "libxl internal locks")
- * are inside the libxl_ctx. Therefore, if those register functions
- * acquire any locks of their own ("caller register locks") outside
- * libxl, to avoid deadlock one of the following must hold for each
- * such caller register lock:
- * (a) "acquire libxl internal locks before caller register lock":
- * No libxl function may be called with the caller register
- * lock held.
- * (b) "acquire caller register lock before libxl internal locks":
- * No libxl function may be called _without_ the caller
- * register lock held.
- * Of these we would normally recommend (a).
- *
- * The value *hooks is not copied and must outlast the libxl_ctx.
- */
-void libxl_osevent_register_hooks(libxl_ctx *ctx,
- const libxl_osevent_hooks *hooks,
- void *user);
-
-/* It is NOT legal to call _occurred_ reentrantly within any libxl
- * function. Specifically it is NOT legal to call it from within
- * a register callback. Conversely, libxl MAY call register/deregister
- * from within libxl_event_occurred_call_*.
- */
-
-void libxl_osevent_occurred_fd(libxl_ctx *ctx, void *for_libxl,
- int fd, short events, short revents)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/* Implicitly, on entry to this function the timeout has been
- * deregistered. If _occurred_timeout is called, libxl will not
- * call timeout_deregister; if it wants to requeue the timeout it
- * will call timeout_register again.
- */
-void libxl_osevent_occurred_timeout(libxl_ctx *ctx, void *for_libxl)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-
-/*======================================================================*/
-
-/*
- * Subprocess handling.
- *
- * Unfortunately the POSIX interface makes this very awkward.
- *
- * There are two possible arrangements for collecting statuses from
- * wait/waitpid.
- *
- * For naive programs:
- *
- * libxl will keep a SIGCHLD handler installed whenever it has an
- * active (unreaped) child. It will reap all children with
- * wait(); any children it does not recognise will be passed to
- * the application via an optional callback (and will result in
- * logged warnings if no callback is provided or the callback
- * denies responsibility for the child).
- *
- * libxl may have children whenever:
- *
- * - libxl is performing an operation which can be made
- * asynchronous; ie one taking a libxl_asyncop_how, even
- * if NULL is passed indicating that the operation is
- * synchronous; or
- *
- * - events of any kind are being generated, as requested
- * by libxl_evenable_....
- *
- * A multithreaded application which is naive in this sense may
- * block SIGCHLD on some of its threads, but there must be at
- * least one thread that has SIGCHLD unblocked. libxl will not
- * modify the blocking flag for SIGCHLD (except that it may create
- * internal service threads with all signals blocked).
- *
- * A naive program must only have at any one time only
- * one libxl context which might have children.
- *
- * For programs which run their own children alongside libxl's:
- *
- * A program which does this must call libxl_childproc_setmode.
- * There are three options:
- *
- * libxl_sigchld_owner_libxl:
- *
- * While any libxl operation which might use child processes
- * is running, works like libxl_sigchld_owner_libxl_always;
- * but, deinstalls the handler the rest of the time.
- *
- * In this mode, the application, while it uses any libxl
- * operation which might create or use child processes (see
- * above):
- * - Must not have any child processes running.
- * - Must not install a SIGCHLD handler.
- * - Must not reap any children.
- *
- * This is the default (i.e. if setmode is not called, or 0 is
- * passed for hooks).
- *
- * libxl_sigchld_owner_mainloop:
- *
- * The application must install a SIGCHLD handler and reap (at
- * least) all of libxl's children and pass their exit status to
- * libxl by calling libxl_childproc_exited. (If the application
- * has multiple libxl ctx's, it must call libxl_childproc_exited
- * on each ctx.)
- *
- * libxl_sigchld_owner_libxl_always:
- *
- * The application expects this libxl ctx to reap all of the
- * process's children, and provides a callback to be notified of
- * their exit statuses. The application must have only one
- * libxl_ctx configured this way.
- *
- * libxl_sigchld_owner_libxl_always_selective_reap:
- *
- * The application expects to reap all of its own children
- * synchronously, and does not use SIGCHLD. libxl is to install
- * a SIGCHLD handler. The application may have multiple
- * libxl_ctxs configured this way; in which case all of its ctxs
- * must be so configured.
- */
-
-
-typedef enum {
- /* libxl owns SIGCHLD whenever it has a child, and reaps
- * all children, including those not spawned by libxl. */
- libxl_sigchld_owner_libxl,
-
- /* Application promises to discover when SIGCHLD occurs and call
- * libxl_childproc_exited or libxl_childproc_sigchld_occurred (but
- * NOT from within a signal handler). libxl will not itself
- * arrange to (un)block or catch SIGCHLD. */
- libxl_sigchld_owner_mainloop,
-
- /* libxl owns SIGCHLD all the time, and the application is
- * relying on libxl's event loop for reaping its children too. */
- libxl_sigchld_owner_libxl_always,
-
- /* libxl owns SIGCHLD all the time, but it must only reap its own
- * children. The application will reap its own children
- * synchronously with waitpid, without the assistance of SIGCHLD. */
- libxl_sigchld_owner_libxl_always_selective_reap,
-} libxl_sigchld_owner;
-
-typedef struct {
- libxl_sigchld_owner chldowner;
-
- /* All of these are optional: */
-
- /* Called by libxl instead of fork. Should behave exactly like
- * fork, including setting errno etc. May NOT reenter into libxl.
- * Application may use this to discover pids of libxl's children,
- * for example.
- */
- pid_t (*fork_replacement)(void *user);
-
- /* With libxl_sigchld_owner_libxl, called by libxl when it has
- * reaped a pid. (Not permitted with _owner_mainloop.)
- *
- * Should return 0 if the child was recognised by the application
- * (or if the application does not keep those kind of records),
- * ERROR_UNKNOWN_CHILD if the application knows that the child is not
- * the application's; if it returns another error code it is a
- * disaster as described for libxl_event_register_callbacks.
- * (libxl will report unexpected children to its error log.)
- *
- * If not supplied, the application is assumed not to start
- * any children of its own.
- *
- * This function is NOT called from within the signal handler.
- * Rather it will be called from inside a libxl's event handling
- * code and thus only when libxl is running, for example from
- * within libxl_event_wait. (libxl uses the self-pipe trick
- * to implement this.)
- *
- * childproc_exited_callback may call back into libxl, but it
- * is best to avoid making long-running libxl calls as that might
- * stall the calling event loop while the nested operation
- * completes.
- */
- int (*reaped_callback)(pid_t, int status, void *user);
-} libxl_childproc_hooks;
-
-/* hooks may be 0 in which is equivalent to &{ libxl_sigchld_owner_libxl, 0, 0 }
- *
- * May not be called when libxl might have any child processes, or the
- * behaviour is undefined. So it is best to call this at
- * initialisation.
- *
- * The value *hooks is not copied and must outlast the libxl_ctx.
- */
-void libxl_childproc_setmode(libxl_ctx *ctx, const libxl_childproc_hooks *hooks,
- void *user);
-
-/*
- * This function is for an application which owns SIGCHLD and which
- * reaps all of the process's children, and dispatches the exit status
- * to the correct place inside the application.
- *
- * May be called only by an application which has called setmode with
- * chldowner == libxl_sigchld_owner_mainloop. If pid was a process started
- * by this instance of libxl, returns 0 after doing whatever
- * processing is appropriate. Otherwise silently returns
- * ERROR_UNKNOWN_CHILD. No other error returns are possible.
- *
- * May NOT be called from within a signal handler which might
- * interrupt any libxl operation. The application will almost
- * certainly need to use the self-pipe trick (or a working pselect or
- * ppoll) to implement this.
- */
-int libxl_childproc_reaped(libxl_ctx *ctx, pid_t, int status)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-/*
- * This function is for an application which owns SIGCHLD but which
- * doesn't keep track of all of its own children in a manner suitable
- * for reaping all of them and then dispatching them.
- *
- * Such an the application must notify libxl, by calling this
- * function, that a SIGCHLD occurred. libxl will then check all its
- * children, reap any that are ready, and take any action necessary -
- * but it will not reap anything else.
- *
- * May be called only by an application which has called setmode with
- * chldowner == libxl_sigchld_owner_mainloop.
- *
- * May NOT be called from within a signal handler which might
- * interrupt any libxl operation (just like libxl_childproc_reaped).
- */
-void libxl_childproc_sigchld_occurred(libxl_ctx *ctx)
- LIBXL_EXTERNAL_CALLERS_ONLY;
-
-
-/*
- * An application which initialises a libxl_ctx in a parent process
- * and then forks a child which does not quickly exec, must
- * instead libxl_postfork_child_noexec in the child. One call
- * on any existing (or specially made) ctx is sufficient; after
- * this all previously existing libxl_ctx's are invalidated and
- * must not be used - or even freed. It is harmless to call this
- * postfork function and then exec anyway.
- *
- * Until libxl_postfork_child_noexec has returned:
- * - No other libxl calls may be made.
- * - If any libxl ctx was configured handle the process's SIGCHLD,
- * the child may not create further (grand)child processes, nor
- * manipulate SIGCHLD.
- *
- * libxl_postfork_child_noexec may not reclaim all the resources
- * associated with the libxl ctx. This includes but is not limited
- * to: ordinary memory; files on disk and in /var/run; file
- * descriptors; memory mapped into the process from domains being
- * managed (grant maps); Xen event channels. Use of libxl in
- * processes which fork long-lived children is not recommended for
- * this reason. libxl_postfork_child_noexec is provided so that
- * an application can make further libxl calls in a child which
- * is going to exec or exit soon.
- */
-void libxl_postfork_child_noexec(libxl_ctx *ctx);
-
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/*
- * Copyright (C) 2011 Citrix Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; version 2.1 only. with the special
- * exception on linking described in file LICENSE.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- */
-
-#ifndef LIBXL_JSON_H
-#define LIBXL_JSON_H
-
-#include <yajl/yajl_gen.h>
-#include <yajl/yajl_parse.h>
-
-#ifdef HAVE_YAJL_YAJL_VERSION_H
-# include <yajl/yajl_version.h>
-#endif
-
-yajl_gen_status libxl__uint64_gen_json(yajl_gen hand, uint64_t val);
-yajl_gen_status libxl_defbool_gen_json(yajl_gen hand, libxl_defbool *p);
-yajl_gen_status libxl_uuid_gen_json(yajl_gen hand, libxl_uuid *p);
-yajl_gen_status libxl_mac_gen_json(yajl_gen hand, libxl_mac *p);
-yajl_gen_status libxl_bitmap_gen_json(yajl_gen hand, libxl_bitmap *p);
-yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
- libxl_cpuid_policy_list *p);
-yajl_gen_status libxl_string_list_gen_json(yajl_gen hand, libxl_string_list *p);
-yajl_gen_status libxl_key_value_list_gen_json(yajl_gen hand,
- libxl_key_value_list *p);
-yajl_gen_status libxl_hwcap_gen_json(yajl_gen hand, libxl_hwcap *p);
-yajl_gen_status libxl_ms_vm_genid_gen_json(yajl_gen hand, libxl_ms_vm_genid *p);
-
-#include <_libxl_types_json.h>
-
-/* YAJL version check */
-#if defined(YAJL_MAJOR) && (YAJL_MAJOR > 1)
-# define HAVE_YAJL_V2 1
-#endif
-
-#ifdef HAVE_YAJL_V2
-
-typedef size_t libxl_yajl_length;
-
-static inline yajl_handle libxl__yajl_alloc(const yajl_callbacks *callbacks,
- yajl_alloc_funcs *allocFuncs,
- void *ctx)
-{
- yajl_handle hand = yajl_alloc(callbacks, allocFuncs, ctx);
- if (hand)
- yajl_config(hand, yajl_allow_trailing_garbage, 1);
- return hand;
-}
-
-static inline yajl_gen libxl_yajl_gen_alloc(const yajl_alloc_funcs *allocFuncs)
-{
- yajl_gen g;
- g = yajl_gen_alloc(allocFuncs);
- if (g)
- yajl_gen_config(g, yajl_gen_beautify, 1);
- return g;
-}
-
-#else /* !HAVE_YAJL_V2 */
-
-#define yajl_complete_parse yajl_parse_complete
-
-typedef unsigned int libxl_yajl_length;
-
-static inline yajl_handle libxl__yajl_alloc(const yajl_callbacks *callbacks,
- const yajl_alloc_funcs *allocFuncs,
- void *ctx)
-{
- yajl_parser_config cfg = {
- .allowComments = 1,
- .checkUTF8 = 1,
- };
- return yajl_alloc(callbacks, &cfg, allocFuncs, ctx);
-}
-
-static inline yajl_gen libxl_yajl_gen_alloc(const yajl_alloc_funcs *allocFuncs)
-{
- yajl_gen_config conf = { 1, " " };
- return yajl_gen_alloc(&conf, allocFuncs);
-}
-
-#endif /* !HAVE_YAJL_V2 */
-
-yajl_gen_status libxl_domain_config_gen_json(yajl_gen hand,
- libxl_domain_config *p);
-
-#endif /* LIBXL_JSON_H */
+++ /dev/null
-/*
- * Copyright (C) 2009 Citrix Ltd.
- * Author Stefano Stabellini <stefano.stabellini@eu.citrix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; version 2.1 only. with the special
- * exception on linking described in file LICENSE.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- */
-
-#ifndef LIBXL_UTILS_H
-#define LIBXL_UTILS_H
-
-#include "libxl.h"
-
-#ifndef LIBXL_HAVE_NONCONST_LIBXL_BASENAME_RETURN_VALUE
-const
-#endif
-char *libxl_basename(const char *name); /* returns string from strdup */
-
-unsigned long libxl_get_required_shadow_memory(unsigned long maxmem_kb, unsigned int smp_cpus);
- /* deprecated; see LIBXL_HAVE_DOMAIN_NEED_MEMORY_CONFIG in libxl.h */
-int libxl_name_to_domid(libxl_ctx *ctx, const char *name, uint32_t *domid);
-int libxl_domain_qualifier_to_domid(libxl_ctx *ctx, const char *name, uint32_t *domid);
-char *libxl_domid_to_name(libxl_ctx *ctx, uint32_t domid);
-int libxl_cpupool_qualifier_to_cpupoolid(libxl_ctx *ctx, const char *p,
- uint32_t *poolid_r,
- int *was_name_r);
-int libxl_name_to_cpupoolid(libxl_ctx *ctx, const char *name, uint32_t *poolid);
-char *libxl_cpupoolid_to_name(libxl_ctx *ctx, uint32_t poolid);
-int libxl_cpupoolid_is_valid(libxl_ctx *ctx, uint32_t poolid);
-int libxl_get_stubdom_id(libxl_ctx *ctx, int guest_domid);
-int libxl_is_stubdom(libxl_ctx *ctx, uint32_t domid, uint32_t *target_domid);
-int libxl_create_logfile(libxl_ctx *ctx, const char *name, char **full_name);
-int libxl_string_to_backend(libxl_ctx *ctx, char *s, libxl_disk_backend *backend);
-
-int libxl_read_file_contents(libxl_ctx *ctx, const char *filename,
- void **data_r, int *datalen_r);
- /* Reads the contents of the plain file filename into a mallocd
- * buffer. Returns 0 or errno. Any errors other than ENOENT are logged.
- * If the file is empty, *data_r and *datalen_r are set to 0.
- * On error, *data_r and *datalen_r are unchanged.
- * data_r and/or datalen_r may be 0.
- */
-
-int libxl_read_exactly(libxl_ctx *ctx, int fd, void *data, ssize_t sz,
- const char *filename, const char *what);
-int libxl_write_exactly(libxl_ctx *ctx, int fd, const void *data,
- ssize_t sz, const char *filename, const char *what);
- /* Returns 0 or errno. If file is truncated on reading, returns
- * EPROTO and you have no way to tell how much was read. Errors are
- * logged using filename (which is only used for logging) and what
- * (which may be 0). */
-
-int libxl_pipe(libxl_ctx *ctx, int pipes[2]);
- /* Just like pipe(2), but log errors. */
-
-void libxl_report_child_exitstatus(libxl_ctx *ctx, xentoollog_level,
- const char *what, pid_t pid, int status);
- /* treats all exit statuses as errors; if that's not what you want,
- * check status yourself first */
-
-int libxl_mac_to_device_nic(libxl_ctx *ctx, uint32_t domid,
- const char *mac, libxl_device_nic *nic);
-int libxl_devid_to_device_nic(libxl_ctx *ctx, uint32_t domid, int devid,
- libxl_device_nic *nic);
-
-int libxl_vdev_to_device_disk(libxl_ctx *ctx, uint32_t domid, const char *vdev,
- libxl_device_disk *disk);
-
-int libxl_uuid_to_device_vtpm(libxl_ctx *ctx, uint32_t domid,
- libxl_uuid *uuid, libxl_device_vtpm *vtpm);
-int libxl_devid_to_device_vtpm(libxl_ctx *ctx, uint32_t domid,
- int devid, libxl_device_vtpm *vtpm);
-int libxl_devid_to_device_usbctrl(libxl_ctx *ctx, uint32_t domid,
- int devid, libxl_device_usbctrl *usbctrl);
-
-int libxl_devid_to_device_vkb(libxl_ctx *ctx, uint32_t domid,
- int devid, libxl_device_vkb *vkb);
-
-int libxl_devid_to_device_vdispl(libxl_ctx *ctx, uint32_t domid,
- int devid, libxl_device_vdispl *vdispl);
-
-int libxl_devid_to_device_vsnd(libxl_ctx *ctx, uint32_t domid,
- int devid, libxl_device_vsnd *vsnd);
-
-int libxl_ctrlport_to_device_usbdev(libxl_ctx *ctx, uint32_t domid,
- int ctrl, int port,
- libxl_device_usbdev *usbdev);
-
-int libxl_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *bitmap, int n_bits);
- /* Allocated bimap is from malloc, libxl_bitmap_dispose() to be
- * called by the application when done. */
-void libxl_bitmap_copy_alloc(libxl_ctx *ctx, libxl_bitmap *dptr,
- const libxl_bitmap *sptr);
-void libxl_bitmap_copy(libxl_ctx *ctx, libxl_bitmap *dptr,
- const libxl_bitmap *sptr);
-int libxl_bitmap_is_full(const libxl_bitmap *bitmap);
-int libxl_bitmap_is_empty(const libxl_bitmap *bitmap);
-int libxl_bitmap_test(const libxl_bitmap *bitmap, int bit);
-void libxl_bitmap_set(libxl_bitmap *bitmap, int bit);
-void libxl_bitmap_reset(libxl_bitmap *bitmap, int bit);
-int libxl_bitmap_count_set(const libxl_bitmap *bitmap);
-int libxl_bitmap_or(libxl_ctx *ctx, libxl_bitmap *or_map,
- const libxl_bitmap *map1,
- const libxl_bitmap *map2);
-int libxl_bitmap_and(libxl_ctx *ctx, libxl_bitmap *and_map,
- const libxl_bitmap *map1,
- const libxl_bitmap *map2);
-char *libxl_bitmap_to_hex_string(libxl_ctx *ctx, const libxl_bitmap *bitmap);
-static inline void libxl_bitmap_set_any(libxl_bitmap *bitmap)
-{
- memset(bitmap->map, -1, bitmap->size);
-}
-static inline void libxl_bitmap_set_none(libxl_bitmap *bitmap)
-{
- memset(bitmap->map, 0, bitmap->size);
-}
-static inline int libxl_bitmap_cpu_valid(libxl_bitmap *bitmap, int bit)
-{
- return bit >= 0 && bit < (bitmap->size * 8);
-}
-#define libxl_for_each_bit(var, map) for (var = 0; var < (map).size * 8; var++)
-#define libxl_for_each_set_bit(v, m) for (v = 0; v < (m).size * 8; v++) \
- if (libxl_bitmap_test(&(m), v))
-
-/*
- * Compares two bitmaps bit by bit, up to nr_bits or, if nr_bits is 0, up
- * to the size of the largest bitmap. If sizes does not match, bits past the
- * of a bitmap are considered as being 0, which matches with the semantic and
- * implementation of libxl_bitmap_test I think().
- *
- * So, basically, [0,1,0] and [0,1] are considered equal, while [0,1,1] and
- * [0,1] are different.
- */
-static inline int libxl_bitmap_equal(const libxl_bitmap *ba,
- const libxl_bitmap *bb,
- int nr_bits)
-{
- int i;
-
- if (nr_bits == 0)
- nr_bits = ba->size > bb->size ? ba->size * 8 : bb->size * 8;
-
- for (i = 0; i < nr_bits; i++) {
- if (libxl_bitmap_test(ba, i) != libxl_bitmap_test(bb, i))
- return 0;
- }
- return 1;
-}
-
-int libxl_cpu_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *cpumap, int max_cpus);
-int libxl_node_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *nodemap,
- int max_nodes);
-int libxl_socket_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *socketmap,
- int max_sockets);
-/* Fill socketmap with the CPU topology information on the system. */
-int libxl_get_online_socketmap(libxl_ctx *ctx, libxl_bitmap *socketmap);
-
-/* Populate cpumap with the cpus spanned by the nodes in nodemap */
-int libxl_nodemap_to_cpumap(libxl_ctx *ctx,
- const libxl_bitmap *nodemap,
- libxl_bitmap *cpumap);
-/* Populate cpumap with the cpus spanned by node */
-int libxl_node_to_cpumap(libxl_ctx *ctx, int node,
- libxl_bitmap *cpumap);
-/* Populate nodemap with the nodes of the cpus in cpumap */
-int libxl_cpumap_to_nodemap(libxl_ctx *ctx,
- const libxl_bitmap *cpumap,
- libxl_bitmap *nodemap);
-
- static inline uint32_t libxl__sizekb_to_mb(uint32_t s) {
- return (s + 1023) / 1024;
-}
-
-void libxl_string_copy(libxl_ctx *ctx, char **dst, char * const*src);
-
-
-#define LIBXL_FILLZERO(object) (memset(&(object), 0, sizeof((object))))
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/*
- * Copyright (C) 2008,2010 Citrix Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; version 2.1 only. with the special
- * exception on linking described in file LICENSE.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- */
-
-#ifndef __LIBXL_UUID_H__
-#define __LIBXL_UUID_H__
-
-#define LIBXL_UUID_FMT "%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx"
-#define LIBXL_UUID_FMTLEN ((2*16)+4) /* 16 hex bytes plus 4 hypens */
-#define LIBXL__UUID_BYTES(uuid) uuid[0], uuid[1], uuid[2], uuid[3], \
- uuid[4], uuid[5], uuid[6], uuid[7], \
- uuid[8], uuid[9], uuid[10], uuid[11], \
- uuid[12], uuid[13], uuid[14], uuid[15]
-#define LIBXL_UUID_BYTES(arg) LIBXL__UUID_BYTES((arg).uuid)
-
-typedef struct {
- /* UUID as an octet stream in big-endian byte-order. */
- unsigned char uuid[16];
-} libxl_uuid;
-
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040700
-#if defined(__linux__)
-
-#include <uuid/uuid.h>
-#include <stdint.h>
-
-#elif defined(__FreeBSD__) || defined(__NetBSD__)
-
-#include <uuid.h>
-#include <stdint.h>
-#include <string.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <assert.h>
-
-#else
-
-#error "Please update libxl_uuid.h for your OS"
-
-#endif
-#endif
-
-int libxl_uuid_is_nil(const libxl_uuid *uuid);
-void libxl_uuid_generate(libxl_uuid *uuid);
-int libxl_uuid_from_string(libxl_uuid *uuid, const char *in);
-void libxl_uuid_copy(libxl_ctx *ctx_opt, libxl_uuid *dst,
- const libxl_uuid *src);
-#if defined(LIBXL_API_VERSION) && LIBXL_API_VERSION < 0x040500
-static inline void libxl_uuid_copy_0x040400(libxl_uuid *dst,
- const libxl_uuid *src)
-{
- libxl_uuid_copy(NULL, dst, src);
-}
-#define libxl_uuid_copy libxl_uuid_copy_0x040400
-#endif
-
-void libxl_uuid_clear(libxl_uuid *uuid);
-int libxl_uuid_compare(const libxl_uuid *uuid1, const libxl_uuid *uuid2);
-const uint8_t *libxl_uuid_bytearray_const(const libxl_uuid *uuid);
-uint8_t *libxl_uuid_bytearray(libxl_uuid *uuid);
-
-#endif /* __LIBXL_UUID_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
include $(XEN_ROOT)/tools/libs/libs.mk
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenstat)/include
-
$(LIB_OBJS): _paths.h
PYLIB=bindings/swig/python/_xenstat.so
+++ /dev/null
-/* libxenstat: statistics-collection library for Xen
- * Copyright (C) International Business Machines Corp., 2005
- * Authors: Josh Triplett <josh@kernel.org>
- * Judy Fischbach <jfisch@cs.pdx.edu>
- * David Hendricks <cro_marmot@comcast.net>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- */
-
-/* libxenstat API */
-
-#ifndef XENSTAT_H
-#define XENSTAT_H
-
-#include <stdbool.h>
-
-/* Opaque handles */
-typedef struct xenstat_handle xenstat_handle;
-typedef struct xenstat_domain xenstat_domain;
-typedef struct xenstat_node xenstat_node;
-typedef struct xenstat_vcpu xenstat_vcpu;
-typedef struct xenstat_network xenstat_network;
-typedef struct xenstat_vbd xenstat_vbd;
-
-/* Initialize the xenstat library. Returns a handle to be used with
- * subsequent calls to the xenstat library, or NULL if an error occurs. */
-xenstat_handle *xenstat_init(void);
-
-/* Release the handle to libxc, free resources, etc. */
-void xenstat_uninit(xenstat_handle * handle);
-
-/* Flags for types of information to collect in xenstat_get_node */
-#define XENSTAT_VCPU 0x1
-#define XENSTAT_NETWORK 0x2
-#define XENSTAT_XEN_VERSION 0x4
-#define XENSTAT_VBD 0x8
-#define XENSTAT_ALL (XENSTAT_VCPU|XENSTAT_NETWORK|XENSTAT_XEN_VERSION|XENSTAT_VBD)
-
-/* Get all available information about a node */
-xenstat_node *xenstat_get_node(xenstat_handle * handle, unsigned int flags);
-
-/* Free the information */
-void xenstat_free_node(xenstat_node * node);
-
-/*
- * Node functions - extract information from a xenstat_node
- */
-
-/* Get information about the domain with the given domain ID */
-xenstat_domain *xenstat_node_domain(xenstat_node * node,
- unsigned int domid);
-
-/* Get the domain with the given index; used to loop over all domains. */
-xenstat_domain *xenstat_node_domain_by_index(xenstat_node * node,
- unsigned index);
-
-/* Get xen version of the node */
-const char *xenstat_node_xen_version(xenstat_node * node);
-
-/* Get amount of total memory on a node */
-unsigned long long xenstat_node_tot_mem(xenstat_node * node);
-
-/* Get amount of free memory on a node */
-unsigned long long xenstat_node_free_mem(xenstat_node * node);
-
-/* Get amount of freeable memory on a node */
-long xenstat_node_freeable_mb(xenstat_node * node);
-
-/* Find the number of domains existing on a node */
-unsigned int xenstat_node_num_domains(xenstat_node * node);
-
-/* Find the number of CPUs existing on a node */
-unsigned int xenstat_node_num_cpus(xenstat_node * node);
-
-/* Get information about the CPU speed */
-unsigned long long xenstat_node_cpu_hz(xenstat_node * node);
-
-/*
- * Domain functions - extract information from a xenstat_domain
- */
-
-/* Get the domain ID for this domain */
-unsigned xenstat_domain_id(xenstat_domain * domain);
-
-/* Set the domain name for the domain */
-char *xenstat_domain_name(xenstat_domain * domain);
-
-/* Get information about how much CPU time has been used */
-unsigned long long xenstat_domain_cpu_ns(xenstat_domain * domain);
-
-/* Find the number of VCPUs allocated to a domain */
-unsigned int xenstat_domain_num_vcpus(xenstat_domain * domain);
-
-/* Get the VCPU handle to obtain VCPU stats */
-xenstat_vcpu *xenstat_domain_vcpu(xenstat_domain * domain,
- unsigned int vcpu);
-
-/* Find the current memory reservation for this domain */
-unsigned long long xenstat_domain_cur_mem(xenstat_domain * domain);
-
-/* Find the maximum memory reservation for this domain */
-unsigned long long xenstat_domain_max_mem(xenstat_domain * domain);
-
-/* Find the domain's SSID */
-unsigned int xenstat_domain_ssid(xenstat_domain * domain);
-
-/* Get domain states */
-unsigned int xenstat_domain_dying(xenstat_domain * domain);
-unsigned int xenstat_domain_crashed(xenstat_domain * domain);
-unsigned int xenstat_domain_shutdown(xenstat_domain * domain);
-unsigned int xenstat_domain_paused(xenstat_domain * domain);
-unsigned int xenstat_domain_blocked(xenstat_domain * domain);
-unsigned int xenstat_domain_running(xenstat_domain * domain);
-
-/* Get the number of networks for a given domain */
-unsigned int xenstat_domain_num_networks(xenstat_domain *);
-
-/* Get the network handle to obtain network stats */
-xenstat_network *xenstat_domain_network(xenstat_domain * domain,
- unsigned int network);
-
-/* Get the number of VBDs for a given domain */
-unsigned int xenstat_domain_num_vbds(xenstat_domain *);
-
-/* Get the VBD handle to obtain VBD stats */
-xenstat_vbd *xenstat_domain_vbd(xenstat_domain * domain,
- unsigned int vbd);
-
-/*
- * VCPU functions - extract information from a xenstat_vcpu
- */
-
-/* Get VCPU usage */
-unsigned int xenstat_vcpu_online(xenstat_vcpu * vcpu);
-unsigned long long xenstat_vcpu_ns(xenstat_vcpu * vcpu);
-
-
-/*
- * Network functions - extract information from a xenstat_network
- */
-
-/* Get the ID for this network */
-unsigned int xenstat_network_id(xenstat_network * network);
-
-/* Get the number of receive bytes for this network */
-unsigned long long xenstat_network_rbytes(xenstat_network * network);
-
-/* Get the number of receive packets for this network */
-unsigned long long xenstat_network_rpackets(xenstat_network * network);
-
-/* Get the number of receive errors for this network */
-unsigned long long xenstat_network_rerrs(xenstat_network * network);
-
-/* Get the number of receive drops for this network */
-unsigned long long xenstat_network_rdrop(xenstat_network * network);
-
-/* Get the number of transmit bytes for this network */
-unsigned long long xenstat_network_tbytes(xenstat_network * network);
-
-/* Get the number of transmit packets for this network */
-unsigned long long xenstat_network_tpackets(xenstat_network * network);
-
-/* Get the number of transmit errors for this network */
-unsigned long long xenstat_network_terrs(xenstat_network * network);
-
-/* Get the number of transmit drops for this network */
-unsigned long long xenstat_network_tdrop(xenstat_network * network);
-
-/*
- * VBD functions - extract information from a xen_vbd
- */
-
-/* Get the back driver type for Virtual Block Device */
-unsigned int xenstat_vbd_type(xenstat_vbd * vbd);
-
-/* Get the device number for Virtual Block Device */
-unsigned int xenstat_vbd_dev(xenstat_vbd * vbd);
-
-/* Get the number of OO/RD/WR requests for vbd */
-unsigned long long xenstat_vbd_oo_reqs(xenstat_vbd * vbd);
-unsigned long long xenstat_vbd_rd_reqs(xenstat_vbd * vbd);
-unsigned long long xenstat_vbd_wr_reqs(xenstat_vbd * vbd);
-unsigned long long xenstat_vbd_rd_sects(xenstat_vbd * vbd);
-unsigned long long xenstat_vbd_wr_sects(xenstat_vbd * vbd);
-
-/* Returns error while getting stats (1 if error happened, 0 otherwise) */
-bool xenstat_vbd_error(xenstat_vbd * vbd);
-
-#endif /* XENSTAT_H */
CFLAGS += -DXEN_LIB_STORED="\"$(XEN_LIB_STORED)\""
CFLAGS += -DXEN_RUN_STORED="\"$(XEN_RUN_STORED)\""
-LINK_FILES = xs_lib.c include/xenstore_lib.h list.h utils.h
+LINK_FILES = xs_lib.c list.h utils.h
$(LIB_OBJS): $(LINK_FILES)
$(LINK_FILES):
- ln -sf $(XEN_ROOT)/tools/xenstore/$(notdir $@) $@
+ ln -sf $(XEN_ROOT)/tools/xenstore/$@ $@
xs.opic: CFLAGS += -DUSE_PTHREAD
ifeq ($(CONFIG_Linux),y)
PKG_CONFIG_REMOVE += -ldl
endif
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenstore)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
-
.PHONY: install
install: install-headers
install-headers:
$(INSTALL_DIR) $(DESTDIR)$(includedir)
$(INSTALL_DIR) $(DESTDIR)$(includedir)/xenstore-compat
- $(INSTALL_DATA) include/compat/xs.h $(DESTDIR)$(includedir)/xenstore-compat/xs.h
- $(INSTALL_DATA) include/compat/xs_lib.h $(DESTDIR)$(includedir)/xenstore-compat/xs_lib.h
+ $(INSTALL_DATA) $(XEN_INCLUDE)/xenstore-compat/xs.h $(DESTDIR)$(includedir)/xenstore-compat/xs.h
+ $(INSTALL_DATA) $(XEN_INCLUDE)/xenstore-compat/xs_lib.h $(DESTDIR)$(includedir)/xenstore-compat/xs_lib.h
ln -sf xenstore-compat/xs.h $(DESTDIR)$(includedir)/xs.h
ln -sf xenstore-compat/xs_lib.h $(DESTDIR)$(includedir)/xs_lib.h
+++ /dev/null
-#warning xs.h is deprecated use xenstore.h instead
-#include <xenstore.h>
+++ /dev/null
-#warning xs_lib.h is deprecated use xenstore_lib.h instead
-#include <xenstore_lib.h>
+++ /dev/null
-/*
- Xen Store Daemon providing simple tree-like database.
- Copyright (C) 2005 Rusty Russell IBM Corporation
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef XENSTORE_H
-#define XENSTORE_H
-
-#include <xenstore_lib.h>
-
-#define XBT_NULL 0
-
-/* Following open flags are deprecated and ignored! */
-#define XS_OPEN_READONLY (1UL<<0)
-#define XS_OPEN_SOCKETONLY (1UL<<1)
-
-/*
- * Setting XS_UNWATCH_FILTER arranges that after xs_unwatch, no
- * related watch events will be delivered via xs_read_watch. But
- * this relies on the couple token, subpath is unique.
- *
- * XS_UNWATCH_FILTER clear XS_UNWATCH_FILTER set
- *
- * Even after xs_unwatch, "stale" After xs_unwatch returns, no
- * instances of the watch event watch events with the same
- * may be delivered. token and with the same subpath
- * will be delivered.
- *
- * A path and a subpath can be The application must avoid
- * register with the same token. registering a path (/foo/) and
- * a subpath (/foo/bar) with the
- * same path until a successful
- * xs_unwatch for the first watch
- * has returned.
- */
-#define XS_UNWATCH_FILTER (1UL<<2)
-
-struct xs_handle;
-typedef uint32_t xs_transaction_t;
-
-/* IMPORTANT: For details on xenstore protocol limits, see
- * docs/misc/xenstore.txt in the Xen public source repository, and use the
- * XENSTORE_*_MAX limit macros defined in xen/io/xs_wire.h.
- */
-
-/* On failure, these routines set errno. */
-
-/* Open a connection to the xs daemon.
- * Attempts to make a connection over the socket interface,
- * and if it fails, then over the xenbus interface.
- *
- * * Connections made with xs_open(0) (which might be shared page or
- * socket based) are only guaranteed to work in the parent after
- * fork.
- * * xs_daemon_open*() and xs_domain_open() are deprecated synonyms
- * for xs_open(0).
- *
- * Returns a handle or NULL.
- */
-struct xs_handle *xs_open(unsigned long flags);
-
-/* Close the connection to the xs daemon. */
-void xs_close(struct xs_handle *xsh /* NULL ok */);
-
-/* Connect to the xs daemon.
- * Returns a handle or NULL.
- * Deprecated, please use xs_open(0) instead
- */
-struct xs_handle *xs_daemon_open(void);
-struct xs_handle *xs_domain_open(void);
-struct xs_handle *xs_daemon_open_readonly(void);
-
-/* Close the connection to the xs daemon.
- * Deprecated, please use xs_close() instead
- */
-void xs_daemon_close(struct xs_handle *);
-
-/* Throw away the connection to the xs daemon, for use after fork(). */
-void xs_daemon_destroy_postfork(struct xs_handle *);
-
-/* Get contents of a directory.
- * Returns a malloced array: call free() on it after use.
- * Num indicates size.
- * Returns NULL on failure.
- */
-char **xs_directory(struct xs_handle *h, xs_transaction_t t,
- const char *path, unsigned int *num);
-
-/* Get the value of a single file, nul terminated.
- * Returns a malloced value: call free() on it after use.
- * len indicates length in bytes, not including terminator.
- * Returns NULL on failure.
- */
-void *xs_read(struct xs_handle *h, xs_transaction_t t,
- const char *path, unsigned int *len);
-
-/* Write the value of a single file.
- * Returns false on failure.
- */
-bool xs_write(struct xs_handle *h, xs_transaction_t t,
- const char *path, const void *data, unsigned int len);
-
-/* Create a new directory.
- * Returns false on failure, or success if it already exists.
- */
-bool xs_mkdir(struct xs_handle *h, xs_transaction_t t,
- const char *path);
-
-/* Destroy a file or directory (and children).
- * Returns false on failure, or if it doesn't exist.
- */
-bool xs_rm(struct xs_handle *h, xs_transaction_t t,
- const char *path);
-
-/* Fake function which will always return false (required to let
- * libxenstore remain at 3.0 version.
- */
-bool xs_restrict(struct xs_handle *h, unsigned domid);
-
-/* Get permissions of node (first element is owner, first perms is "other").
- * Returns malloced array, or NULL: call free() after use.
- */
-struct xs_permissions *xs_get_permissions(struct xs_handle *h,
- xs_transaction_t t,
- const char *path, unsigned int *num);
-
-/* Set permissions of node (must be owner). Returns false on failure.
- *
- * Domain 0 may read / write anywhere in the store, regardless of
- * permission settings.
- *
- * Note:
- * The perms array is a list of (domid, permissions) pairs. The first
- * element in the list specifies the owner of the list, plus the flags
- * for every domain not explicitly specified subsequently. The
- * subsequent entries are normal capabilities.
- *
- * Example C code:
- *
- * struct xs_permissions perms[2];
- *
- * perms[0].id = dm_domid;
- * perms[0].perms = XS_PERM_NONE;
- * perms[1].id = guest_domid;
- * perms[1].perms = XS_PERM_READ;
- *
- * It means the owner of the path is domain $dm_domid (hence it always
- * has read and write permission), all other domains (unless specified
- * in subsequent pair) can neither read from nor write to that
- * path. It then specifies domain $guest_domid can read from that
- * path.
- */
-bool xs_set_permissions(struct xs_handle *h, xs_transaction_t t,
- const char *path, struct xs_permissions *perms,
- unsigned int num_perms);
-
-/* Watch a node for changes (poll on fd to detect, or call read_watch()).
- * When the node (or any child) changes, fd will become readable.
- * Token is returned when watch is read, to allow matching.
- * Returns false on failure.
- */
-bool xs_watch(struct xs_handle *h, const char *path, const char *token);
-
-/* Return the FD to poll on to see if a watch has fired. */
-int xs_fileno(struct xs_handle *h);
-
-/* Check for node changes. On success, returns a non-NULL pointer ret
- * such that ret[0] and ret[1] are valid C strings, namely the
- * triggering path (see docs/misc/xenstore.txt) and the token (from
- * xs_watch). On error return value is NULL setting errno.
- *
- * Callers should, after xs_fileno has become readable, repeatedly
- * call xs_check_watch until it returns NULL and sets errno to EAGAIN.
- * (If the fd became readable, xs_check_watch is allowed to make it no
- * longer show up as readable even if future calls to xs_check_watch
- * will return more watch events.)
- *
- * After the caller is finished with the returned information it
- * should be freed all in one go with free(ret).
- */
-char **xs_check_watch(struct xs_handle *h);
-
-/* Find out what node change was on (will block if nothing pending).
- * Returns array containing the path and token, or NULL.
- * Use XS_WATCH_* to access these elements.
- * Call free() after use.
- */
-char **xs_read_watch(struct xs_handle *h, unsigned int *num);
-
-/* Remove a watch on a node: implicitly acks any outstanding watch.
- * Returns false on failure (no watch on that node).
- */
-bool xs_unwatch(struct xs_handle *h, const char *path, const char *token);
-
-/* Start a transaction: changes by others will not be seen during this
- * transaction, and changes will not be visible to others until end.
- * Returns NULL on failure.
- */
-xs_transaction_t xs_transaction_start(struct xs_handle *h);
-
-/* End a transaction.
- * If abandon is true, transaction is discarded instead of committed.
- * Returns false on failure: if errno == EAGAIN, you have to restart
- * transaction.
- */
-bool xs_transaction_end(struct xs_handle *h, xs_transaction_t t,
- bool abort);
-
-/* Introduce a new domain.
- * This tells the store daemon about a shared memory page, event channel and
- * store path associated with a domain: the domain uses these to communicate.
- */
-bool xs_introduce_domain(struct xs_handle *h,
- unsigned int domid,
- unsigned long mfn,
- unsigned int eventchn);
-
-/* Set the target of a domain
- * This tells the store daemon that a domain is targetting another one, so
- * it should let it tinker with it.
- */
-bool xs_set_target(struct xs_handle *h,
- unsigned int domid,
- unsigned int target);
-
-/* Resume a domain.
- * Clear the shutdown flag for this domain in the store.
- */
-bool xs_resume_domain(struct xs_handle *h, unsigned int domid);
-
-/* Release a domain.
- * Tells the store domain to release the memory page to the domain.
- */
-bool xs_release_domain(struct xs_handle *h, unsigned int domid);
-
-/* Query the home path of a domain. Call free() after use.
- */
-char *xs_get_domain_path(struct xs_handle *h, unsigned int domid);
-
-/* Returns true if child is either equal to parent, or a node underneath
- * parent; or false otherwise. Done by string comparison, so relative and
- * absolute pathnames never in a parent/child relationship by this
- * definition. Cannot fail.
- */
-bool xs_path_is_subpath(const char *parent, const char *child);
-
-/* Return whether the domain specified has been introduced to xenstored.
- */
-bool xs_is_domain_introduced(struct xs_handle *h, unsigned int domid);
-
-char *xs_control_command(struct xs_handle *h, const char *cmd,
- void *data, unsigned int len);
-/* Deprecated: use xs_control_command() instead. */
-char *xs_debug_command(struct xs_handle *h, const char *cmd,
- void *data, unsigned int len);
-
-int xs_suspend_evtchn_port(int domid);
-#endif /* XENSTORE_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "linux"
- * indent-tabs-mode: t
- * c-basic-offset: 8
- * tab-width: 8
- * End:
- */
MAJOR = 1
MINOR = 0
-AUTOINCS := include/_xentoolcore_list.h
+AUTOINCS := $(XEN_INCLUDE)/_xentoolcore_list.h
SRCS-y += handlereg.c
include $(XEN_ROOT)/tools/libs/libs.mk
PKG_CONFIG_DESC := Central support for Xen Hypervisor userland libraries
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxentoolcore)/include
$(LIB_OBJS): $(AUTOINCS)
$(PIC_OBJS): $(AUTOINCS)
-include/_xentoolcore_list.h: $(XEN_INCLUDE)/xen-external/bsd-sys-queue-h-seddery $(XEN_INCLUDE)/xen-external/bsd-sys-queue.h
- $(PERL) $^ --prefix=xentoolcore >$@.new
- $(call move-if-changed,$@.new,$@)
+$(XEN_INCLUDE)/_xentoolcore_list.h: $(XEN_INCLUDE)/xen-external/bsd-sys-queue-h-seddery $(XEN_INCLUDE)/xen-external/bsd-sys-queue.h
+ $(PERL) $^ --prefix=xentoolcore >$(notdir $@).new
+ $(call move-if-changed,$(notdir $@).new,$@)
+++ /dev/null
-/*
- * xentoolcore.h
- *
- * Copyright (c) 2017 Citrix
- *
- * Common features used/provided by all Xen tools libraries
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef XENTOOLCORE_H
-#define XENTOOLCORE_H
-
-#include <stdint.h>
-#include <xen/xen.h>
-
-/*
- * int xentoolcore_restrict_all(domid_t domid);
- *
- * Arranges that Xen library handles (fds etc.) which are currently held
- * by Xen libraries, can no longer be used other than to affect domid.
- *
- * Does not prevent effects that amount only to
- * - denial of service, possibly host-wide, by resource exhaustion etc.
- *
- * If this cannot be achieved, returns -1 and sets errno.
- * If called again with the same domid, it may succeed, or it may
- * fail (even though such a call is potentially meaningful).
- * (If called again with a different domid, it will necessarily fail.)
- *
- * Note for multi-threaded programs: If xentoolcore_restrict_all is
- * called concurrently with a function which /or closes Xen library
- * handles (e.g. libxl_ctx_free, xs_close), the restriction is only
- * guaranteed to be effective after all of the closing functions have
- * returned, even if that is later than the return from
- * xentoolcore_restrict_all. (Of course if xentoolcore_restrict_all
- * it is called concurrently with opening functions, the new handles
- * might or might not be restricted.)
- *
- * ====================================================================
- * IMPORTANT - IMPLEMENTATION STATUS
- *
- * This function has been implemented insofar as it appears necessary
- * for the purposes of running a deprivileged qemu, and is believed to
- * be sufficient (subject to the caveats discussed in the appropriate
- * libxl documentation for this feature).
- *
- * However, this function is NOT implemented for all Xen libraries.
- * For each use case of this function, the designer must evaluate and
- * audit whether the implementation is sufficient in their specific
- * context.
- *
- * Of course, patches to extend the implementation are very welcome.
- * ====================================================================
- *
- * Thread safe.
- *
- * We expect that no callers do the following:
- * - in one thread call xen_somelibrary_open|close
- * - in another thread call fork
- * - in the child of the fork, before exec, call
- * xen_some[other]library_open|close or xentoolcore_restrict_all
- *
- */
-int xentoolcore_restrict_all(domid_t domid);
-
-#endif /* XENTOOLCORE_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
+++ /dev/null
-/*
- * xentoolcore_internal.h
- *
- * Interfaces of xentoolcore directed internally at other Xen libraries
- *
- * Copyright (c) 2017 Citrix
- *
- * Common code used by all Xen tools libraries
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef XENTOOLCORE_INTERNAL_H
-#define XENTOOLCORE_INTERNAL_H
-
-#include <stddef.h>
-
-#include "xentoolcore.h"
-#include "_xentoolcore_list.h"
-
-/*---------- active handle registration ----------*/
-
-/*
- * This is all to support xentoolcore_restrict_all
- *
- * Any libxl library that opens a Xen control handle of any kind which
- * might allow manipulation of dom0, of other domains, or of the whole
- * machine, must:
- * I. arrange that their own datastructure contains a
- * Xentoolcore__Active_Handle
- *
- * II. during the "open handle" function
- * 1. allocate the memory for the own datastructure and initialise it
- * 2. set Xentoolcore__Active_Handle.restrict_callback
- * 3. call xentoolcore__register_active_handle
- * 3a. if the open fails, call xentoolcore__deregister_active_handle
- * 4. ONLY THEN actually open the relevant fd or whatever
- *
- * III. during the "close handle" function
- * 1. FIRST call xentoolcore__deregister_active_handle
- * 2. close the relevant fd or whatever
- *
- * [ III(b). Do the same as III for error exit from the open function. ]
- *
- * IV. in the restrict_callback function
- * * Arrange that the fd (or other handle) can no longer by used
- * other than with respect to domain domid.
- * * Future attempts to manipulate other domains (or the whole
- * host) via this handle must cause an error return (and
- * perhaps a log message), not a crash
- * * If selective restriction is not possible, the handle must
- * be completely invalidated so that it is not useable;
- * subsequent manipulations may not crash
- * * The restrict_callback function should not normally fail
- * if this can be easily avoided - it is better to make the
- * handle nonfunction instead.
- * * NB that restrict_callback might be called again. That must
- * work properly: if the domid is the same, it is idempotent.
- * If the domid is different. then either the handle must be
- * completely invalidated, or restrict_callback must fail.)
- *
- * Thread safety:
- * xentoolcore__[de]register_active_handle are threadsafe
- * but MUST NOT be called within restrict_callback
- *
- * Fork safety:
- * Libraries which use these functions do not on that account
- * need to take any special care over forks occurring in
- * other threads, provided that they obey the rules above.
- */
-
-typedef struct Xentoolcore__Active_Handle Xentoolcore__Active_Handle;
-
-typedef int Xentoolcore__Restrict_Callback(Xentoolcore__Active_Handle*,
- domid_t domid);
-
-struct Xentoolcore__Active_Handle {
- Xentoolcore__Restrict_Callback *restrict_callback;
- XENTOOLCORE_LIST_ENTRY(Xentoolcore__Active_Handle) entry;
-};
-
-void xentoolcore__register_active_handle(Xentoolcore__Active_Handle*);
-void xentoolcore__deregister_active_handle(Xentoolcore__Active_Handle*);
-
-/*
- * Utility function for use in restrict_callback in libraries whose
- * handles don't have a useful restrict function. We neuter the fd by
- * dup'ing /dev/null onto it. This is better than closing it, because
- * it does not involve locking against concurrent uses of in other
- * threads.
- *
- * Returns the value that restrict_callback should return.
- * fd may be < 0.
- */
-int xentoolcore__restrict_by_dup2_null(int fd);
-
-/* ---------- convenient stuff ---------- */
-
-/*
- * This does not appear in xentoolcore.h because it is a bit
- * namespace-unclean.
- */
-
-/*
- * Convenience macros.
- */
-
-/*
- * CONTAINER_OF work like this. Given:
- * typedef struct {
- * ...
- * member_type member_name;
- * ...
- * } outer_type;
- * outer_type outer, *outer_var;
- * member_type *inner_ptr = &outer->member_name;
- *
- * Then, effectively:
- * outer_type *CONTAINER_OF(member_type *inner_ptr,
- * *outer_var, // or type name for outer_type
- * member_name);
- *
- * So that:
- * CONTAINER_OF(inner_ptr, *outer_var, member_name) == &outer
- * CONTAINER_OF(inner_ptr, outer_type, member_name) == &outer
- */
-#define CONTAINER_OF(inner_ptr, outer, member_name) \
- ({ \
- typeof(outer) *container_of_; \
- container_of_ = (void*)((char*)(inner_ptr) - \
- offsetof(typeof(outer), member_name)); \
- (void)(&container_of_->member_name == \
- (typeof(inner_ptr))0) /* type check */; \
- container_of_; \
- })
-
-#endif /* XENTOOLCORE_INTERNAL_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
SRCS-y += xtl_logger_stdio.c
include $(XEN_ROOT)/tools/libs/libs.mk
-
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxentoollog)/include
+++ /dev/null
-/*
- * xentoollog.h
- *
- * Copyright (c) 2010 Citrix
- * Part of a generic logging interface used by various dom0 userland libraries.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef XENTOOLLOG_H
-#define XENTOOLLOG_H
-
-#include <stdio.h>
-#include <stdarg.h>
-
-
-/*---------- common declarations and types ----------*/
-
-typedef enum xentoollog_level {
- XTL_NONE, /* sentinel etc, never used for logging */
- XTL_DEBUG,
- XTL_VERBOSE,
- XTL_DETAIL,
- XTL_PROGRESS, /* also used for "progress" messages */
- XTL_INFO,
- XTL_NOTICE,
- XTL_WARN,
- XTL_ERROR,
- XTL_CRITICAL,
- XTL_NUM_LEVELS
-} xentoollog_level;
-
-typedef struct xentoollog_logger xentoollog_logger;
-struct xentoollog_logger {
- void (*vmessage)(struct xentoollog_logger *logger,
- xentoollog_level level,
- int errnoval /* or -1 */,
- const char *context /* eg "xc", "xl", may be 0 */,
- const char *format /* without level, context, \n */,
- va_list al)
- __attribute__((format(printf,5,0)));
- void (*progress)(struct xentoollog_logger *logger,
- const char *context /* see above */,
- const char *doing_what /* no \r,\n */,
- int percent, unsigned long done, unsigned long total)
- /* null function pointer is ok.
- * will always be called with done==0 for each new
- * context/doing_what */;
- void (*destroy)(struct xentoollog_logger *logger);
- /* each logger can put its necessary data here */
-};
-
-
-/*---------- facilities for consuming log messages ----------*/
-
-#define XTL_STDIOSTREAM_SHOW_PID 001u
-#define XTL_STDIOSTREAM_SHOW_DATE 002u
-#define XTL_STDIOSTREAM_HIDE_PROGRESS 004u
-#define XTL_STDIOSTREAM_PROGRESS_USE_CR 010u /* default is to */
-#define XTL_STDIOSTREAM_PROGRESS_NO_CR 020u /* use \r to ttys */
-
-typedef struct xentoollog_logger_stdiostream xentoollog_logger_stdiostream;
-
-xentoollog_logger_stdiostream *xtl_createlogger_stdiostream
- (FILE *f, xentoollog_level min_level, unsigned flags);
- /* may return 0 if malloc fails, in which case error was logged */
- /* destroy on this logger does not close the file */
-
-void xtl_stdiostream_set_minlevel(xentoollog_logger_stdiostream*,
- xentoollog_level min_level);
-void xtl_stdiostream_adjust_flags(xentoollog_logger_stdiostream*,
- unsigned set_flags, unsigned clear_flags);
- /* if set_flags and clear_flags overlap, set_flags takes precedence */
-
-void xtl_logger_destroy(struct xentoollog_logger *logger /* 0 is ok */);
-
-
-/*---------- facilities for generating log messages ----------*/
-
-void xtl_logv(struct xentoollog_logger *logger,
- xentoollog_level level,
- int errnoval /* or -1 */,
- const char *context /* eg "xc", "xenstore", "xl", may be 0 */,
- const char *format /* does not contain \n */,
- va_list) __attribute__((format(printf,5,0)));
-
-void xtl_log(struct xentoollog_logger *logger,
- xentoollog_level level,
- int errnoval /* or -1 */,
- const char *context /* eg "xc", "xenstore", "xl" */,
- const char *format /* does not contain \n */,
- ...) __attribute__((format(printf,5,6)));
-
-void xtl_progress(struct xentoollog_logger *logger,
- const char *context /* see above, may be 0 */,
- const char *doing_what,
- unsigned long done, unsigned long total);
-
-
-/*---------- facilities for defining log message consumers ----------*/
-
-const char *xtl_level_to_string(xentoollog_level); /* never fails */
-
-
-#define XTL_NEW_LOGGER(LOGGER,buffer) ({ \
- xentoollog_logger_##LOGGER *new_consumer; \
- \
- (buffer).vtable.vmessage = LOGGER##_vmessage; \
- (buffer).vtable.progress = LOGGER##_progress; \
- (buffer).vtable.destroy = LOGGER##_destroy; \
- \
- new_consumer = malloc(sizeof(*new_consumer)); \
- if (!new_consumer) { \
- xtl_log((xentoollog_logger*)&buffer, \
- XTL_CRITICAL, errno, "xtl", \
- "failed to allocate memory for new message logger"); \
- } else { \
- *new_consumer = buffer; \
- } \
- \
- new_consumer; \
-});
-
-
-#endif /* XENTOOLLOG_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
include $(XEN_ROOT)/tools/libs/libs.mk
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenutil)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
-
$(LIB_OBJS) $(PIC_OBJS): $(AUTOINCS) _paths.h
%.c %.h:: %.y
+++ /dev/null
-/*
- * Copyright (C) 2010 Citrix Ltd.
- * Author Ian Jackson <ian.jackson@eu.citrix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; version 2.1 only. with the special
- * exception on linking described in file LICENSE.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- */
-
-#ifndef LIBXLUTIL_H
-#define LIBXLUTIL_H
-
-#include <stdio.h>
-
-#include "libxl.h"
-
-enum XLU_ConfigValueType {
- XLU_STRING,
- XLU_LIST,
-};
-
-enum XLU_Operation {
- XLU_OP_ASSIGNMENT = 0,
- XLU_OP_ADDITION,
-};
-
-/* Unless otherwise stated, all functions return an errno value. */
-typedef struct XLU_Config XLU_Config;
-typedef struct XLU_ConfigList XLU_ConfigList;
-typedef struct XLU_ConfigValue XLU_ConfigValue;
-
-XLU_Config *xlu_cfg_init(FILE *report, const char *report_filename);
- /* 0 means we got ENOMEM. */
- /* report_filename is copied; report is saved and must remain valid
- * until the Config is destroyed. */
-
-int xlu_cfg_readfile(XLU_Config*, const char *real_filename);
-int xlu_cfg_readdata(XLU_Config*, const char *data, int length);
- /* If these fail, then it is undefined behaviour to call xlu_cfg_get_...
- * functions. You have to just xlu_cfg_destroy. */
-
-void xlu_cfg_destroy(XLU_Config*);
-
-
-/* All of the following print warnings to "report" if there is a problem.
- * Return values are:
- * 0 OK
- * ESRCH not defined
- * EINVAL value found but wrong format for request (prints warning unless dont_warn=true)
- * ERANGE value out of range (from strtol)
- */
-
-int xlu_cfg_get_string(const XLU_Config*, const char *n, const char **value_r,
- int dont_warn);
-/* free/strdup version */
-int xlu_cfg_replace_string(const XLU_Config *cfg, const char *n,
- char **value_r, int dont_warn);
-int xlu_cfg_get_long(const XLU_Config*, const char *n, long *value_r,
- int dont_warn);
-int xlu_cfg_get_bounded_long(const XLU_Config*, const char *n, long min,
- long max, long *value_r, int dont_warn);
-int xlu_cfg_get_defbool(const XLU_Config*, const char *n, libxl_defbool *b,
- int dont_warn);
-
-int xlu_cfg_get_list(const XLU_Config*, const char *n,
- XLU_ConfigList **list_r /* may be 0 */,
- int *entries_r /* may be 0 */,
- int dont_warn);
- /* there is no need to free *list_r; lifetime is that of the XLU_Config */
-int xlu_cfg_get_list_as_string_list(const XLU_Config *cfg, const char *n,
- libxl_string_list *sl, int dont_warn);
-const char *xlu_cfg_get_listitem(const XLU_ConfigList*, int entry);
- /* xlu_cfg_get_listitem cannot fail, except that if entry is
- * out of range it returns 0 (not setting errno) */
-
-enum XLU_ConfigValueType xlu_cfg_value_type(const XLU_ConfigValue *value);
-int xlu_cfg_value_get_string(const XLU_Config *cfg, XLU_ConfigValue *value,
- char **value_r, int dont_warn);
-int xlu_cfg_value_get_list(const XLU_Config *cfg, XLU_ConfigValue *value,
- XLU_ConfigList **value_r, int dont_warn);
-XLU_ConfigValue *xlu_cfg_get_listitem2(const XLU_ConfigList *list,
- int entry);
-
-/*
- * Disk specification parsing.
- */
-
-int xlu_disk_parse(XLU_Config *cfg, int nspecs, const char *const *specs,
- libxl_device_disk *disk);
- /* disk must have been initialised.
- *
- * On error, returns errno value. Bad strings cause EINVAL and
- * print a message to cfg's report (that's all cfg is used for).
- *
- * Normally one would pass nspecs==1 and only specs[0]. But it is
- * permitted to pass more strings in which case each is parsed as a
- * string containing a collection of parameters (but they all refer
- * to of the configuration for a single disk).
- *
- * nspecs==0 is permitted but since it does not specify some mandatory
- * properties, it produces a run-time configuration error if the
- * resulting disk struct is used with libxl.
- */
-
-/*
- * PCI specification parsing
- */
-int xlu_pci_parse_bdf(XLU_Config *cfg, libxl_device_pci *pcidev, const char *str);
-
-/*
- * RDM parsing
- */
-int xlu_rdm_parse(XLU_Config *cfg, libxl_rdm_reserve *rdm, const char *str);
-
-/*
- * Vif rate parsing.
- */
-
-int xlu_vif_parse_rate(XLU_Config *cfg, const char *rate,
- libxl_device_nic *nic);
-
-#endif /* LIBXLUTIL_H */
-
-/*
- * Local variables:
- * mode: C
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
include $(XEN_ROOT)/tools/libs/libs.mk
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenvchan)/include
-$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
-
clean: cleanlocal
.PHONY: cleanlocal
+++ /dev/null
-/**
- * @file
- * @section AUTHORS
- *
- * Copyright (C) 2010 Rafal Wojtczuk <rafal@invisiblethingslab.com>
- *
- * Authors:
- * Rafal Wojtczuk <rafal@invisiblethingslab.com>
- * Daniel De Graaf <dgdegra@tycho.nsa.gov>
- *
- * @section LICENSE
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * @section DESCRIPTION
- *
- * Originally borrowed from the Qubes OS Project, http://www.qubes-os.org,
- * this code has been substantially rewritten to use the gntdev and gntalloc
- * devices instead of raw MFNs and map_foreign_range.
- *
- * This is a library for inter-domain communication. A standard Xen ring
- * buffer is used, with a datagram-based interface built on top. The grant
- * reference and event channels are shared in XenStore under the path
- * /local/domain/<srv-id>/data/vchan/<cli-id>/<port>/{ring-ref,event-channel}
- *
- * The ring.h macros define an asymmetric interface to a shared data structure
- * that assumes all rings reside in a single contiguous memory space. This is
- * not suitable for vchan because the interface to the ring is symmetric except
- * for the setup. Unlike the producer-consumer rings defined in ring.h, the
- * size of the rings used in vchan are determined at execution time instead of
- * compile time, so the macros in ring.h cannot be used to access the rings.
- */
-
-#include <xen/io/libxenvchan.h>
-#include <xen/xen.h>
-#include <xen/sys/evtchn.h>
-#include <xenevtchn.h>
-#include <xengnttab.h>
-
-/* Callers who don't care don't need to #include <xentoollog.h> */
-struct xentoollog_logger;
-
-struct libxenvchan_ring {
- /* Pointer into the shared page. Offsets into buffer. */
- struct ring_shared* shr;
- /* ring data; may be its own shared page(s) depending on order */
- void* buffer;
- /**
- * The size of the ring is (1 << order); offsets wrap around when they
- * exceed this. This copy is required because we can't trust the order
- * in the shared page to remain constant.
- */
- int order;
-};
-
-/**
- * struct libxenvchan: control structure passed to all library calls
- */
-struct libxenvchan {
- /* Mapping handle for shared ring page */
- union {
- xengntshr_handle *gntshr; /* for server */
- xengnttab_handle *gnttab; /* for client */
- };
- /* Pointer to shared ring page */
- struct vchan_interface *ring;
- /* event channel interface */
- xenevtchn_handle *event;
- uint32_t event_port;
- /* informative flags: are we acting as server? */
- int is_server:1;
- /* true if server remains active when client closes (allows reconnection) */
- int server_persist:1;
- /* true if operations should block instead of returning 0 */
- int blocking:1;
- /* communication rings */
- struct libxenvchan_ring read, write;
-};
-
-/**
- * Set up a vchan, including granting pages
- * @param logger Logger for libxc errors
- * @param domain The peer domain that will be connecting
- * @param xs_path Base xenstore path for storing ring/event data
- * @param send_min The minimum size (in bytes) of the send ring (left)
- * @param recv_min The minimum size (in bytes) of the receive ring (right)
- * @return The structure, or NULL in case of an error
- */
-struct libxenvchan *libxenvchan_server_init(struct xentoollog_logger *logger,
- int domain, const char* xs_path,
- size_t read_min, size_t write_min);
-/**
- * Connect to an existing vchan. Note: you can reconnect to an existing vchan
- * safely, however no locking is performed, so you must prevent multiple clients
- * from connecting to a single server.
- *
- * @param logger Logger for libxc errors
- * @param domain The peer domain to connect to
- * @param xs_path Base xenstore path for storing ring/event data
- * @return The structure, or NULL in case of an error
- */
-struct libxenvchan *libxenvchan_client_init(struct xentoollog_logger *logger,
- int domain, const char* xs_path);
-/**
- * Close a vchan. This deallocates the vchan and attempts to free its
- * resources. The other side is notified of the close, but can still read any
- * data pending prior to the close.
- */
-void libxenvchan_close(struct libxenvchan *ctrl);
-
-/**
- * Packet-based receive: always reads exactly $size bytes.
- * @param ctrl The vchan control structure
- * @param data Buffer for data that was read
- * @param size Size of the buffer and amount of data to read
- * @return -1 on error, 0 if nonblocking and insufficient data is available, or $size
- */
-int libxenvchan_recv(struct libxenvchan *ctrl, void *data, size_t size);
-/**
- * Stream-based receive: reads as much data as possible.
- * @param ctrl The vchan control structure
- * @param data Buffer for data that was read
- * @param size Size of the buffer
- * @return -1 on error, otherwise the amount of data read (which may be zero if
- * the vchan is nonblocking)
- */
-int libxenvchan_read(struct libxenvchan *ctrl, void *data, size_t size);
-/**
- * Packet-based send: send entire buffer if possible
- * @param ctrl The vchan control structure
- * @param data Buffer for data to send
- * @param size Size of the buffer and amount of data to send
- * @return -1 on error, 0 if nonblocking and insufficient space is available, or $size
- */
-int libxenvchan_send(struct libxenvchan *ctrl, const void *data, size_t size);
-/**
- * Stream-based send: send as much data as possible.
- * @param ctrl The vchan control structure
- * @param data Buffer for data to send
- * @param size Size of the buffer
- * @return -1 on error, otherwise the amount of data sent (which may be zero if
- * the vchan is nonblocking)
- */
-int libxenvchan_write(struct libxenvchan *ctrl, const void *data, size_t size);
-/**
- * Waits for reads or writes to unblock, or for a close
- */
-int libxenvchan_wait(struct libxenvchan *ctrl);
-/**
- * Returns the event file descriptor for this vchan. When this FD is readable,
- * libxenvchan_wait() will not block, and the state of the vchan has changed since
- * the last invocation of libxenvchan_wait().
- */
-int libxenvchan_fd_for_select(struct libxenvchan *ctrl);
-/**
- * Query the state of the vchan shared page:
- * return 0 when one side has called libxenvchan_close() or crashed
- * return 1 when both sides are open
- * return 2 [server only] when no client has yet connected
- */
-int libxenvchan_is_open(struct libxenvchan* ctrl);
-/** Amount of data ready to read, in bytes */
-int libxenvchan_data_ready(struct libxenvchan *ctrl);
-/** Amount of data it is possible to send without blocking */
-int libxenvchan_buffer_space(struct libxenvchan *ctrl);
libs: $(LIBS)
-_xtl_levels.ml.in _xtl_levels.mli.in _xtl_levels.inc: genlevels.py $(XEN_ROOT)/tools/libs/toollog/include/xentoollog.h
+_xtl_levels.ml.in _xtl_levels.mli.in _xtl_levels.inc: genlevels.py $(XEN_INCLUDE)/xentoollog.h
$(PYTHON) genlevels.py _xtl_levels.mli.in _xtl_levels.ml.in _xtl_levels.inc
.PHONY: install
from functools import reduce
def read_levels():
- f = open('../../../libs/toollog/include/xentoollog.h', 'r')
+ f = open('../../../include/xentoollog.h', 'r')
levels = []
record = False
+++ /dev/null
-/*
- Common routines between Xen store user library and daemon.
- Copyright (C) 2005 Rusty Russell IBM Corporation
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef XENSTORE_LIB_H
-#define XENSTORE_LIB_H
-
-#include <stddef.h>
-#include <stdbool.h>
-#include <limits.h>
-#include <errno.h>
-#include <stdint.h>
-#include <xen/io/xs_wire.h>
-
-/* Bitmask of permissions. */
-enum xs_perm_type {
- XS_PERM_NONE = 0,
- XS_PERM_READ = 1,
- XS_PERM_WRITE = 2,
- /* Internal use. */
- XS_PERM_ENOENT_OK = 4,
- XS_PERM_OWNER = 8,
-};
-
-struct xs_permissions
-{
- unsigned int id;
- enum xs_perm_type perms;
-};
-
-/* Header of the node record in tdb. */
-struct xs_tdb_record_hdr {
- uint64_t generation;
- uint32_t num_perms;
- uint32_t datalen;
- uint32_t childlen;
- struct xs_permissions perms[0];
-};
-
-/* Each 10 bits takes ~ 3 digits, plus one, plus one for nul terminator. */
-#define MAX_STRLEN(x) ((sizeof(x) * CHAR_BIT + CHAR_BIT-1) / 10 * 3 + 2)
-
-/* Path for various daemon things: env vars can override. */
-const char *xs_daemon_rootdir(void);
-const char *xs_daemon_rundir(void);
-const char *xs_daemon_socket(void);
-const char *xs_daemon_socket_ro(void);
-const char *xs_domain_dev(void);
-const char *xs_daemon_tdb(void);
-
-/* Simple write function: loops for you. */
-bool xs_write_all(int fd, const void *data, unsigned int len);
-
-/* Convert strings to permissions. False if a problem. */
-bool xs_strings_to_perms(struct xs_permissions *perms, unsigned int num,
- const char *strings);
-
-/* Convert permissions to a string (up to len MAX_STRLEN(unsigned int)+1). */
-bool xs_perm_to_string(const struct xs_permissions *perm,
- char *buffer, size_t buf_len);
-
-/* Given a string and a length, count how many strings (nul terms). */
-unsigned int xs_count_strings(const char *strings, unsigned int len);
-
-/* Sanitising (quoting) possibly-binary strings. */
-struct expanding_buffer {
- char *buf;
- int avail;
-};
-
-/* Ensure that given expanding buffer has at least min_avail characters. */
-char *expanding_buffer_ensure(struct expanding_buffer *, int min_avail);
-
-/* sanitise_value() may return NULL if malloc fails. */
-char *sanitise_value(struct expanding_buffer *, const char *val, unsigned len);
-
-/* *out_len_r on entry is ignored; out must be at least strlen(in)+1 bytes. */
-void unsanitise_value(char *out, unsigned *out_len_r, const char *in);
-
-#endif /* XENSTORE_LIB_H */