]> xenbits.xensource.com Git - people/jgross/xen.git/commitdiff
tools: move libxenctrl below tools/libs
authorJuergen Gross <jgross@suse.com>
Sun, 23 Aug 2020 08:00:15 +0000 (10:00 +0200)
committerJuergen Gross <jgross@suse.com>
Fri, 28 Aug 2020 15:00:47 +0000 (17:00 +0200)
Today tools/libxc needs to be built after tools/libs as libxenctrl is
depending on some libraries in tools/libs. This in turn blocks moving
other libraries depending on libxenctrl below tools/libs.

So carve out libxenctrl from tools/libxc and move it into
tools/libs/ctrl.

Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Samuel Thibault <samuel.thibault@ens-lyon.org> (stubdom parts)
Acked-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com> (python parts)
Acked-by: Wei Liu <wl@xen.org>
109 files changed:
.gitignore
MAINTAINERS
stubdom/Makefile
stubdom/grub/Makefile
stubdom/mini-os.mk
tools/Rules.mk
tools/libs/Makefile
tools/libs/ctrl/Makefile [new file with mode: 0644]
tools/libs/ctrl/include/xenctrl.h [new file with mode: 0644]
tools/libs/ctrl/include/xenctrl_compat.h [new file with mode: 0644]
tools/libs/ctrl/include/xenctrl_dom.h [new file with mode: 0644]
tools/libs/ctrl/xc_altp2m.c [new file with mode: 0644]
tools/libs/ctrl/xc_arinc653.c [new file with mode: 0644]
tools/libs/ctrl/xc_bitops.h [new file with mode: 0644]
tools/libs/ctrl/xc_core.c [new file with mode: 0644]
tools/libs/ctrl/xc_core.h [new file with mode: 0644]
tools/libs/ctrl/xc_core_arm.c [new file with mode: 0644]
tools/libs/ctrl/xc_core_arm.h [new file with mode: 0644]
tools/libs/ctrl/xc_core_x86.c [new file with mode: 0644]
tools/libs/ctrl/xc_core_x86.h [new file with mode: 0644]
tools/libs/ctrl/xc_cpu_hotplug.c [new file with mode: 0644]
tools/libs/ctrl/xc_cpupool.c [new file with mode: 0644]
tools/libs/ctrl/xc_csched.c [new file with mode: 0644]
tools/libs/ctrl/xc_csched2.c [new file with mode: 0644]
tools/libs/ctrl/xc_devicemodel_compat.c [new file with mode: 0644]
tools/libs/ctrl/xc_domain.c [new file with mode: 0644]
tools/libs/ctrl/xc_evtchn.c [new file with mode: 0644]
tools/libs/ctrl/xc_evtchn_compat.c [new file with mode: 0644]
tools/libs/ctrl/xc_flask.c [new file with mode: 0644]
tools/libs/ctrl/xc_foreign_memory.c [new file with mode: 0644]
tools/libs/ctrl/xc_freebsd.c [new file with mode: 0644]
tools/libs/ctrl/xc_gnttab.c [new file with mode: 0644]
tools/libs/ctrl/xc_gnttab_compat.c [new file with mode: 0644]
tools/libs/ctrl/xc_hcall_buf.c [new file with mode: 0644]
tools/libs/ctrl/xc_kexec.c [new file with mode: 0644]
tools/libs/ctrl/xc_linux.c [new file with mode: 0644]
tools/libs/ctrl/xc_mem_access.c [new file with mode: 0644]
tools/libs/ctrl/xc_mem_paging.c [new file with mode: 0644]
tools/libs/ctrl/xc_memshr.c [new file with mode: 0644]
tools/libs/ctrl/xc_minios.c [new file with mode: 0644]
tools/libs/ctrl/xc_misc.c [new file with mode: 0644]
tools/libs/ctrl/xc_monitor.c [new file with mode: 0644]
tools/libs/ctrl/xc_msr_x86.h [new file with mode: 0644]
tools/libs/ctrl/xc_netbsd.c [new file with mode: 0644]
tools/libs/ctrl/xc_pagetab.c [new file with mode: 0644]
tools/libs/ctrl/xc_physdev.c [new file with mode: 0644]
tools/libs/ctrl/xc_pm.c [new file with mode: 0644]
tools/libs/ctrl/xc_private.c [new file with mode: 0644]
tools/libs/ctrl/xc_private.h [new file with mode: 0644]
tools/libs/ctrl/xc_psr.c [new file with mode: 0644]
tools/libs/ctrl/xc_resource.c [new file with mode: 0644]
tools/libs/ctrl/xc_resume.c [new file with mode: 0644]
tools/libs/ctrl/xc_rt.c [new file with mode: 0644]
tools/libs/ctrl/xc_solaris.c [new file with mode: 0644]
tools/libs/ctrl/xc_tbuf.c [new file with mode: 0644]
tools/libs/ctrl/xc_vm_event.c [new file with mode: 0644]
tools/libs/uselibs.mk
tools/libxc/Makefile
tools/libxc/include/xenctrl.h [deleted file]
tools/libxc/include/xenctrl_compat.h [deleted file]
tools/libxc/include/xenctrl_dom.h [deleted file]
tools/libxc/xc_altp2m.c [deleted file]
tools/libxc/xc_arinc653.c [deleted file]
tools/libxc/xc_bitops.h [deleted file]
tools/libxc/xc_core.c [deleted file]
tools/libxc/xc_core.h [deleted file]
tools/libxc/xc_core_arm.c [deleted file]
tools/libxc/xc_core_arm.h [deleted file]
tools/libxc/xc_core_x86.c [deleted file]
tools/libxc/xc_core_x86.h [deleted file]
tools/libxc/xc_cpu_hotplug.c [deleted file]
tools/libxc/xc_cpupool.c [deleted file]
tools/libxc/xc_csched.c [deleted file]
tools/libxc/xc_csched2.c [deleted file]
tools/libxc/xc_devicemodel_compat.c [deleted file]
tools/libxc/xc_domain.c [deleted file]
tools/libxc/xc_evtchn.c [deleted file]
tools/libxc/xc_evtchn_compat.c [deleted file]
tools/libxc/xc_flask.c [deleted file]
tools/libxc/xc_foreign_memory.c [deleted file]
tools/libxc/xc_freebsd.c [deleted file]
tools/libxc/xc_gnttab.c [deleted file]
tools/libxc/xc_gnttab_compat.c [deleted file]
tools/libxc/xc_hcall_buf.c [deleted file]
tools/libxc/xc_kexec.c [deleted file]
tools/libxc/xc_linux.c [deleted file]
tools/libxc/xc_mem_access.c [deleted file]
tools/libxc/xc_mem_paging.c [deleted file]
tools/libxc/xc_memshr.c [deleted file]
tools/libxc/xc_minios.c [deleted file]
tools/libxc/xc_misc.c [deleted file]
tools/libxc/xc_monitor.c [deleted file]
tools/libxc/xc_msr_x86.h [deleted file]
tools/libxc/xc_netbsd.c [deleted file]
tools/libxc/xc_pagetab.c [deleted file]
tools/libxc/xc_physdev.c [deleted file]
tools/libxc/xc_pm.c [deleted file]
tools/libxc/xc_private.c [deleted file]
tools/libxc/xc_private.h [deleted file]
tools/libxc/xc_psr.c [deleted file]
tools/libxc/xc_resource.c [deleted file]
tools/libxc/xc_resume.c [deleted file]
tools/libxc/xc_rt.c [deleted file]
tools/libxc/xc_solaris.c [deleted file]
tools/libxc/xc_tbuf.c [deleted file]
tools/libxc/xc_vm_event.c [deleted file]
tools/ocaml/xenstored/Makefile
tools/python/Makefile
tools/python/setup.py

index 823f4743dc75993d7bb55c8c699833cfd80af44c..d22b031ed2e01053b80d02ee1cda2eb56c3003bc 100644 (file)
@@ -114,6 +114,9 @@ tools/libs/hypfs/headers.chk
 tools/libs/hypfs/xenhypfs.pc
 tools/libs/call/headers.chk
 tools/libs/call/xencall.pc
+tools/libs/ctrl/_*.[ch]
+tools/libs/ctrl/libxenctrl.map
+tools/libs/ctrl/xencontrol.pc
 tools/libs/foreignmemory/headers.chk
 tools/libs/foreignmemory/xenforeignmemory.pc
 tools/libs/devicemodel/headers.chk
@@ -195,6 +198,11 @@ tools/include/xen-foreign/*.(c|h|size)
 tools/include/xen-foreign/checker
 tools/libvchan/xenvchan.pc
 tools/libxc/*.pc
+tools/libxc/xc_bitops.h
+tools/libxc/xc_core.h
+tools/libxc/xc_core_arm.h
+tools/libxc/xc_core_x86.h
+tools/libxc/xc_private.h
 tools/libxl/_libxl.api-for-check
 tools/libxl/*.api-ok
 tools/libxl/*.pc
index ffe2310294b45411e2a16193d1a3be6a2cef33b9..26c5382075092f863e61ab8152d70de0316ac62b 100644 (file)
@@ -226,7 +226,7 @@ M:  Stewart Hildebrand <stewart.hildebrand@dornerworks.com>
 S:     Supported
 L:     xen-devel@dornerworks.com
 F:     xen/common/sched/arinc653.c
-F:     tools/libxc/xc_arinc653.c
+F:     tools/libs/ctrl/xc_arinc653.c
 
 ARM (W/ VIRTUALISATION EXTENSIONS) ARCHITECTURE
 M:     Stefano Stabellini <sstabellini@kernel.org>
index c466858db0f1fe43192bb59dced99e34249b4781..961a9f87048ffabb27f4798380c7c3162699f764 100644 (file)
@@ -331,7 +331,7 @@ endif
 # libraries under tools/libs
 #######
 
-STUB_LIBS := toolcore toollog evtchn gnttab call foreignmemory devicemodel
+STUB_LIBS := toolcore toollog evtchn gnttab call foreignmemory devicemodel ctrl
 
 #######
 # common handling
@@ -396,12 +396,11 @@ $(TARGETS_MINIOS): mini-os-%:
 #######
 
 .PHONY: libxc
-libxc: libxc-$(XEN_TARGET_ARCH)/libxenctrl.a libxc-$(XEN_TARGET_ARCH)/libxenguest.a
-libxc-$(XEN_TARGET_ARCH)/libxenctrl.a: mk-headers-$(XEN_TARGET_ARCH) libxentoollog libxenevtchn libxengnttab libxencall libxenforeignmemory libxendevicemodel cross-zlib
+libxc: libxc-$(XEN_TARGET_ARCH)/libxenguest.a
+libxc-$(XEN_TARGET_ARCH)/libxenguest.a: libxenevtchn libxenctrl cross-zlib
+libxc-$(XEN_TARGET_ARCH)/libxenguest.a: mk-headers-$(XEN_TARGET_ARCH) $(NEWLIB_STAMPFILE)
        CPPFLAGS="$(TARGET_CPPFLAGS)" CFLAGS="$(TARGET_CFLAGS)" $(MAKE) DESTDIR= CONFIG_LIBXC_MINIOS=y -C libxc-$(XEN_TARGET_ARCH)
 
- libxc-$(XEN_TARGET_ARCH)/libxenguest.a: libxc-$(XEN_TARGET_ARCH)/libxenctrl.a
-
 #######
 # ioemu
 #######
index 26dff45a8fb95fcb762984b6dc0d1d2fb46862fb..d33fa2f71ea89f80523d3b49b7b5c209bc8993f6 100644 (file)
@@ -6,7 +6,9 @@ vpath %.c ../grub-upstream
 BOOT=$(OBJ_DIR)/boot-$(XEN_TARGET_ARCH).o
 
 DEF_CPPFLAGS += -I$(XEN_ROOT)/tools/libs/toollog/include
-DEF_CPPFLAGS += -I$(XEN_ROOT)/tools/libxc/include -I$(XEN_ROOT)/tools/include -I.
+DEF_CPPFLAGS += -I$(XEN_ROOT)/tools/libs/ctrl/include
+DEF_CPPFLAGS += -I$(XEN_ROOT)/tools/libxc/include
+DEF_CPPFLAGS += -I$(XEN_ROOT)/tools/include -I.
 DEF_CPPFLAGS += -I../grub-upstream/stage1
 DEF_CPPFLAGS += -I../grub-upstream/stage2
 DEF_CPPFLAGS += -I../grub-upstream/netboot
index 32528bb91f906fca0346020fff7266240e3e4a67..b1387df3f8e9abfe915fa2fc06562f1d87621650 100644 (file)
@@ -13,5 +13,5 @@ GNTTAB_PATH = $(XEN_ROOT)/stubdom/libs-$(MINIOS_TARGET_ARCH)/gnttab
 CALL_PATH = $(XEN_ROOT)/stubdom/libs-$(MINIOS_TARGET_ARCH)/call
 FOREIGNMEMORY_PATH = $(XEN_ROOT)/stubdom/libs-$(MINIOS_TARGET_ARCH)/foreignmemory
 DEVICEMODEL_PATH = $(XEN_ROOT)/stubdom/libs-$(MINIOS_TARGET_ARCH)/devicemodel
-CTRL_PATH = $(XEN_ROOT)/stubdom/libxc-$(MINIOS_TARGET_ARCH)
+CTRL_PATH = $(XEN_ROOT)/stubdom/libs-$(MINIOS_TARGET_ARCH)/ctrl
 GUEST_PATH = $(XEN_ROOT)/stubdom/libxc-$(MINIOS_TARGET_ARCH)
index 4f3aaaacd37765423b2293b7f304f05dd3f91e88..1cc56e9ab83a9e96e9ab7dd870fab7c1d267abb1 100644 (file)
@@ -15,7 +15,6 @@ XEN_INCLUDE        = $(XEN_ROOT)/tools/include
 
 include $(XEN_ROOT)/tools/libs/uselibs.mk
 
-XEN_libxenctrl     = $(XEN_ROOT)/tools/libxc
 XEN_libxenguest    = $(XEN_ROOT)/tools/libxc
 XEN_libxenlight    = $(XEN_ROOT)/tools/libxl
 # Currently libxlutil lives in the same directory as libxenlight
@@ -105,13 +104,10 @@ $(foreach lib,$(LIBS_LIBS),$(eval $(call LIB_defs,$(lib))))
 
 # code which compiles against libxenctrl get __XEN_TOOLS__ and
 # therefore sees the unstable hypercall interfaces.
-CFLAGS_libxenctrl = -I$(XEN_libxenctrl)/include $(CFLAGS_libxentoollog) $(CFLAGS_libxenforeignmemory) $(CFLAGS_libxendevicemodel) $(CFLAGS_xeninclude) -D__XEN_TOOLS__
-SHDEPS_libxenctrl = $(SHLIB_libxentoollog) $(SHLIB_libxenevtchn) $(SHLIB_libxengnttab) $(SHLIB_libxencall) $(SHLIB_libxenforeignmemory) $(SHLIB_libxendevicemodel)
-LDLIBS_libxenctrl = $(SHDEPS_libxenctrl) $(XEN_libxenctrl)/libxenctrl$(libextension)
-SHLIB_libxenctrl  = $(SHDEPS_libxenctrl) -Wl,-rpath-link=$(XEN_libxenctrl)
+CFLAGS_libxenctrl += $(CFLAGS_libxentoollog) $(CFLAGS_libxenforeignmemory) $(CFLAGS_libxendevicemodel) -D__XEN_TOOLS__
 
 CFLAGS_libxenguest = -I$(XEN_libxenguest)/include $(CFLAGS_libxenevtchn) $(CFLAGS_libxenforeignmemory) $(CFLAGS_xeninclude)
-SHDEPS_libxenguest = $(SHLIB_libxenevtchn)
+SHDEPS_libxenguest = $(SHLIB_libxenevtchn) $(SHLIB_libxenctrl)
 LDLIBS_libxenguest = $(SHDEPS_libxenguest) $(XEN_libxenguest)/libxenguest$(libextension)
 SHLIB_libxenguest  = $(SHDEPS_libxenguest) -Wl,-rpath-link=$(XEN_libxenguest)
 
index 69cdfb59757e4dec283e9c7f69b519390575b722..7648ea0e4cfc96f0831cc8078914e9a8f1ad4769 100644 (file)
@@ -9,6 +9,7 @@ SUBDIRS-y += gnttab
 SUBDIRS-y += call
 SUBDIRS-y += foreignmemory
 SUBDIRS-y += devicemodel
+SUBDIRS-y += ctrl
 SUBDIRS-y += hypfs
 
 ifeq ($(CONFIG_RUMP),y)
diff --git a/tools/libs/ctrl/Makefile b/tools/libs/ctrl/Makefile
new file mode 100644 (file)
index 0000000..ec93fb5
--- /dev/null
@@ -0,0 +1,72 @@
+XEN_ROOT = $(CURDIR)/../../..
+include $(XEN_ROOT)/tools/Rules.mk
+
+SRCS-y       += xc_altp2m.c
+SRCS-y       += xc_core.c
+SRCS-$(CONFIG_X86) += xc_core_x86.c
+SRCS-$(CONFIG_ARM) += xc_core_arm.c
+SRCS-y       += xc_cpupool.c
+SRCS-y       += xc_domain.c
+SRCS-y       += xc_evtchn.c
+SRCS-y       += xc_gnttab.c
+SRCS-y       += xc_misc.c
+SRCS-y       += xc_flask.c
+SRCS-y       += xc_physdev.c
+SRCS-y       += xc_private.c
+SRCS-y       += xc_csched.c
+SRCS-y       += xc_csched2.c
+SRCS-y       += xc_arinc653.c
+SRCS-y       += xc_rt.c
+SRCS-y       += xc_tbuf.c
+SRCS-y       += xc_pm.c
+SRCS-y       += xc_cpu_hotplug.c
+SRCS-y       += xc_resume.c
+SRCS-y       += xc_vm_event.c
+SRCS-y       += xc_monitor.c
+SRCS-y       += xc_mem_paging.c
+SRCS-y       += xc_mem_access.c
+SRCS-y       += xc_memshr.c
+SRCS-y       += xc_hcall_buf.c
+SRCS-y       += xc_foreign_memory.c
+SRCS-y       += xc_kexec.c
+SRCS-y       += xc_resource.c
+SRCS-$(CONFIG_X86) += xc_psr.c
+SRCS-$(CONFIG_X86) += xc_pagetab.c
+SRCS-$(CONFIG_Linux) += xc_linux.c
+SRCS-$(CONFIG_FreeBSD) += xc_freebsd.c
+SRCS-$(CONFIG_SunOS) += xc_solaris.c
+SRCS-$(CONFIG_NetBSD) += xc_netbsd.c
+SRCS-$(CONFIG_NetBSDRump) += xc_netbsd.c
+SRCS-$(CONFIG_MiniOS) += xc_minios.c
+SRCS-y       += xc_evtchn_compat.c
+SRCS-y       += xc_gnttab_compat.c
+SRCS-y       += xc_devicemodel_compat.c
+
+CFLAGS   += -D__XEN_TOOLS__
+CFLAGS += $(PTHREAD_CFLAGS)
+CFLAGS += -include $(XEN_ROOT)/tools/config.h
+
+# Needed for posix_fadvise64() in xc_linux.c
+CFLAGS-$(CONFIG_Linux) += -D_GNU_SOURCE
+
+LIBHEADER := xenctrl.h xenctrl_compat.h xenctrl_dom.h
+PKG_CONFIG := xencontrol.pc
+PKG_CONFIG_NAME := Xencontrol
+
+NO_HEADERS_CHK := y
+
+include $(XEN_ROOT)/tools/libs/libs.mk
+
+genpath-target = $(call buildmakevars2header,_paths.h)
+$(eval $(genpath-target))
+
+$(LIB_OBJS) $(PIC_OBJS): _paths.h
+
+$(PKG_CONFIG_LOCAL): PKG_CONFIG_INCDIR = $(XEN_libxenctrl)/include
+$(PKG_CONFIG_LOCAL): PKG_CONFIG_CFLAGS_LOCAL = $(CFLAGS_xeninclude)
+
+clean: cleanlocal
+
+.PHONY: cleanlocal
+cleanlocal:
+       rm -f libxenctrl.map
diff --git a/tools/libs/ctrl/include/xenctrl.h b/tools/libs/ctrl/include/xenctrl.h
new file mode 100644 (file)
index 0000000..4c89b72
--- /dev/null
@@ -0,0 +1,2668 @@
+/******************************************************************************
+ * xenctrl.h
+ *
+ * A library for low-level access to the Xen control interfaces.
+ *
+ * Copyright (c) 2003-2004, K A Fraser.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XENCTRL_H
+#define XENCTRL_H
+
+/* Tell the Xen public headers we are a user-space tools build. */
+#ifndef __XEN_TOOLS__
+#define __XEN_TOOLS__ 1
+#endif
+
+#include <unistd.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <xen/xen.h>
+#include <xen/domctl.h>
+#include <xen/physdev.h>
+#include <xen/sysctl.h>
+#include <xen/version.h>
+#include <xen/event_channel.h>
+#include <xen/sched.h>
+#include <xen/memory.h>
+#include <xen/grant_table.h>
+#include <xen/hvm/dm_op.h>
+#include <xen/hvm/params.h>
+#include <xen/xsm/flask_op.h>
+#include <xen/kexec.h>
+#include <xen/platform.h>
+
+#include "xentoollog.h"
+
+#if defined(__i386__) || defined(__x86_64__)
+#include <xen/foreign/x86_32.h>
+#include <xen/foreign/x86_64.h>
+#include <xen/arch-x86/xen-mca.h>
+#endif
+
+#define XC_PAGE_SHIFT           12
+#define XC_PAGE_SIZE            (1UL << XC_PAGE_SHIFT)
+#define XC_PAGE_MASK            (~(XC_PAGE_SIZE-1))
+
+#define INVALID_MFN  (~0UL)
+
+/*
+ *  DEFINITIONS FOR CPU BARRIERS
+ */
+
+#define xen_barrier() asm volatile ( "" : : : "memory")
+
+#if defined(__i386__)
+#define xen_mb()  asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
+#define xen_rmb() xen_barrier()
+#define xen_wmb() xen_barrier()
+#elif defined(__x86_64__)
+#define xen_mb()  asm volatile ( "mfence" : : : "memory")
+#define xen_rmb() xen_barrier()
+#define xen_wmb() xen_barrier()
+#elif defined(__arm__)
+#define xen_mb()   asm volatile ("dmb" : : : "memory")
+#define xen_rmb()  asm volatile ("dmb" : : : "memory")
+#define xen_wmb()  asm volatile ("dmb" : : : "memory")
+#elif defined(__aarch64__)
+#define xen_mb()   asm volatile ("dmb sy" : : : "memory")
+#define xen_rmb()  asm volatile ("dmb sy" : : : "memory")
+#define xen_wmb()  asm volatile ("dmb sy" : : : "memory")
+#else
+#error "Define barriers"
+#endif
+
+
+#define XENCTRL_HAS_XC_INTERFACE 1
+/* In Xen 4.0 and earlier, xc_interface_open and xc_evtchn_open would
+ * both return ints being the file descriptor.  In 4.1 and later, they
+ * return an xc_interface* and xc_evtchn*, respectively - ie, a
+ * pointer to an opaque struct.  This #define is provided in 4.1 and
+ * later, allowing out-of-tree callers to more easily distinguish
+ * between, and be compatible with, both versions.
+ */
+
+
+/*
+ *  GENERAL
+ *
+ * Unless otherwise specified, each function here returns zero or a
+ * non-null pointer on success; or in case of failure, sets errno and
+ * returns -1 or a null pointer.
+ *
+ * Unless otherwise specified, errors result in a call to the error
+ * handler function, which by default prints a message to the
+ * FILE* passed as the caller_data, which by default is stderr.
+ * (This is described below as "logging errors".)
+ *
+ * The error handler can safely trash errno, as libxc saves it across
+ * the callback.
+ */
+
+typedef struct xc_interface_core xc_interface;
+
+enum xc_error_code {
+  XC_ERROR_NONE = 0,
+  XC_INTERNAL_ERROR = 1,
+  XC_INVALID_KERNEL = 2,
+  XC_INVALID_PARAM = 3,
+  XC_OUT_OF_MEMORY = 4,
+  /* new codes need to be added to xc_error_level_to_desc too */
+};
+
+typedef enum xc_error_code xc_error_code;
+
+
+/*
+ *  INITIALIZATION FUNCTIONS
+ */
+
+/**
+ * This function opens a handle to the hypervisor interface.  This function can
+ * be called multiple times within a single process.  Multiple processes can
+ * have an open hypervisor interface at the same time.
+ *
+ * Note:
+ * After fork a child process must not use any opened xc interface
+ * handle inherited from their parent. They must open a new handle if
+ * they want to interact with xc.
+ *
+ * Each call to this function should have a corresponding call to
+ * xc_interface_close().
+ *
+ * This function can fail if the caller does not have superuser permission or
+ * if a Xen-enabled kernel is not currently running.
+ *
+ * @return a handle to the hypervisor interface
+ */
+xc_interface *xc_interface_open(xentoollog_logger *logger,
+                                xentoollog_logger *dombuild_logger,
+                                unsigned open_flags);
+  /* if logger==NULL, will log to stderr
+   * if dombuild_logger=NULL, will log to a file
+   */
+
+/*
+ * Note: if XC_OPENFLAG_NON_REENTRANT is passed then libxc must not be
+ * called reentrantly and the calling application is responsible for
+ * providing mutual exclusion surrounding all libxc calls itself.
+ *
+ * In particular xc_{get,clear}_last_error only remain valid for the
+ * duration of the critical section containing the call which failed.
+ */
+enum xc_open_flags {
+    XC_OPENFLAG_DUMMY =  1<<0, /* do not actually open a xenctrl interface */
+    XC_OPENFLAG_NON_REENTRANT = 1<<1, /* assume library is only every called from a single thread */
+};
+
+/**
+ * This function closes an open hypervisor interface.
+ *
+ * This function can fail if the handle does not represent an open interface or
+ * if there were problems closing the interface.  In the latter case
+ * the interface is still closed.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @return 0 on success, -1 otherwise.
+ */
+int xc_interface_close(xc_interface *xch);
+
+/**
+ * Return the handles which xch has opened and will use for
+ * hypercalls, foreign memory accesses and device model operations.
+ * These may be used with the corresponding libraries so long as the
+ * xch itself remains open.
+ */
+struct xencall_handle *xc_interface_xcall_handle(xc_interface *xch);
+struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch);
+struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch);
+
+/*
+ * HYPERCALL SAFE MEMORY BUFFER
+ *
+ * Ensure that memory which is passed to a hypercall has been
+ * specially allocated in order to be safe to access from the
+ * hypervisor.
+ *
+ * Each user data pointer is shadowed by an xc_hypercall_buffer data
+ * structure. You should never define an xc_hypercall_buffer type
+ * directly, instead use the DECLARE_HYPERCALL_BUFFER* macros below.
+ *
+ * The strucuture should be considered opaque and all access should be
+ * via the macros and helper functions defined below.
+ *
+ * Once the buffer is declared the user is responsible for explicitly
+ * allocating and releasing the memory using
+ * xc_hypercall_buffer_alloc(_pages) and
+ * xc_hypercall_buffer_free(_pages).
+ *
+ * Once the buffer has been allocated the user can initialise the data
+ * via the normal pointer. The xc_hypercall_buffer structure is
+ * transparently referenced by the helper macros (such as
+ * xen_set_guest_handle) in order to check at compile time that the
+ * correct type of memory is being used.
+ */
+struct xc_hypercall_buffer {
+    /* Hypercall safe memory buffer. */
+    void *hbuf;
+
+    /*
+     * Reference to xc_hypercall_buffer passed as argument to the
+     * current function.
+     */
+    struct xc_hypercall_buffer *param_shadow;
+
+    /*
+     * Direction of copy for bounce buffering.
+     */
+    int dir;
+
+    /* Used iff dir != 0. */
+    void *ubuf;
+    size_t sz;
+};
+typedef struct xc_hypercall_buffer xc_hypercall_buffer_t;
+
+/*
+ * Construct the name of the hypercall buffer for a given variable.
+ * For internal use only
+ */
+#define XC__HYPERCALL_BUFFER_NAME(_name) xc__hypercall_buffer_##_name
+
+/*
+ * Returns the hypercall_buffer associated with a variable.
+ */
+#define HYPERCALL_BUFFER(_name)                                 \
+    ({  xc_hypercall_buffer_t _hcbuf_buf1;                      \
+        typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_buf2 = \
+                &XC__HYPERCALL_BUFFER_NAME(_name);              \
+        (void)(&_hcbuf_buf1 == _hcbuf_buf2);                    \
+        (_hcbuf_buf2)->param_shadow ?                           \
+                (_hcbuf_buf2)->param_shadow : (_hcbuf_buf2);    \
+     })
+
+#define HYPERCALL_BUFFER_INIT_NO_BOUNCE .dir = 0, .sz = 0, .ubuf = (void *)-1
+
+/*
+ * Defines a hypercall buffer and user pointer with _name of _type.
+ *
+ * The user accesses the data as normal via _name which will be
+ * transparently converted to the hypercall buffer as necessary.
+ */
+#define DECLARE_HYPERCALL_BUFFER(_type, _name)                 \
+    _type *(_name) = NULL;                                     \
+    xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
+        .hbuf = NULL,                                          \
+        .param_shadow = NULL,                                  \
+        HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
+    }
+
+/*
+ * Like DECLARE_HYPERCALL_BUFFER() but using an already allocated
+ * hypercall buffer, _hbuf.
+ *
+ * Useful when a hypercall buffer is passed to a function and access
+ * via the user pointer is required.
+ *
+ * See DECLARE_HYPERCALL_BUFFER_ARGUMENT() if the user pointer is not
+ * required.
+ */
+#define DECLARE_HYPERCALL_BUFFER_SHADOW(_type, _name, _hbuf)   \
+    _type *(_name) = (_hbuf)->hbuf;                            \
+    __attribute__((unused))                                    \
+    xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
+        .hbuf = (void *)-1,                                    \
+        .param_shadow = (_hbuf),                               \
+        HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
+    }
+
+/*
+ * Declare the necessary data structure to allow a hypercall buffer
+ * passed as an argument to a function to be used in the normal way.
+ */
+#define DECLARE_HYPERCALL_BUFFER_ARGUMENT(_name)               \
+    xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
+        .hbuf = (void *)-1,                                    \
+        .param_shadow = (_name),                               \
+        HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
+    }
+
+/*
+ * Get the hypercall buffer data pointer in a form suitable for use
+ * directly as a hypercall argument.
+ */
+#define HYPERCALL_BUFFER_AS_ARG(_name)                          \
+    ({  xc_hypercall_buffer_t _hcbuf_arg1;                      \
+        typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_arg2 = \
+                HYPERCALL_BUFFER(_name);                        \
+        (void)(&_hcbuf_arg1 == _hcbuf_arg2);                    \
+        (unsigned long)(_hcbuf_arg2)->hbuf;                     \
+     })
+
+/*
+ * Set a xen_guest_handle in a type safe manner, ensuring that the
+ * data pointer has been correctly allocated.
+ */
+#define set_xen_guest_handle_impl(_hnd, _val, _byte_off)        \
+    do {                                                        \
+        xc_hypercall_buffer_t _hcbuf_hnd1;                      \
+        typeof(XC__HYPERCALL_BUFFER_NAME(_val)) *_hcbuf_hnd2 =  \
+                HYPERCALL_BUFFER(_val);                         \
+        (void) (&_hcbuf_hnd1 == _hcbuf_hnd2);                   \
+        set_xen_guest_handle_raw(_hnd,                          \
+                (_hcbuf_hnd2)->hbuf + (_byte_off));             \
+    } while (0)
+
+#undef set_xen_guest_handle
+#define set_xen_guest_handle(_hnd, _val)                        \
+    set_xen_guest_handle_impl(_hnd, _val, 0)
+
+#define set_xen_guest_handle_offset(_hnd, _val, _off)           \
+    set_xen_guest_handle_impl(_hnd, _val,                       \
+            ((sizeof(*_val)*(_off))))
+
+/* Use with set_xen_guest_handle in place of NULL */
+extern xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL);
+
+/*
+ * Allocate and free hypercall buffers with byte granularity.
+ */
+void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size);
+#define xc_hypercall_buffer_alloc(_xch, _name, _size) xc__hypercall_buffer_alloc(_xch, HYPERCALL_BUFFER(_name), _size)
+void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b);
+#define xc_hypercall_buffer_free(_xch, _name) xc__hypercall_buffer_free(_xch, HYPERCALL_BUFFER(_name))
+
+/*
+ * Allocate and free hypercall buffers with page alignment.
+ */
+void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
+#define xc_hypercall_buffer_alloc_pages(_xch, _name, _nr) xc__hypercall_buffer_alloc_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
+void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
+#define xc_hypercall_buffer_free_pages(_xch, _name, _nr)                    \
+    do {                                                                    \
+        if ( _name )                                                        \
+            xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name),  \
+                                            _nr);                           \
+    } while (0)
+
+/*
+ * Array of hypercall buffers.
+ *
+ * Create an array with xc_hypercall_buffer_array_create() and
+ * populate it by declaring one hypercall buffer in a loop and
+ * allocating the buffer with xc_hypercall_buffer_array_alloc().
+ *
+ * To access a previously allocated buffers, declare a new hypercall
+ * buffer and call xc_hypercall_buffer_array_get().
+ *
+ * Destroy the array with xc_hypercall_buffer_array_destroy() to free
+ * the array and all its allocated hypercall buffers.
+ */
+struct xc_hypercall_buffer_array;
+typedef struct xc_hypercall_buffer_array xc_hypercall_buffer_array_t;
+
+xc_hypercall_buffer_array_t *xc_hypercall_buffer_array_create(xc_interface *xch, unsigned n);
+void *xc__hypercall_buffer_array_alloc(xc_interface *xch, xc_hypercall_buffer_array_t *array,
+                                       unsigned index, xc_hypercall_buffer_t *hbuf, size_t size);
+#define xc_hypercall_buffer_array_alloc(_xch, _array, _index, _name, _size) \
+    xc__hypercall_buffer_array_alloc(_xch, _array, _index, HYPERCALL_BUFFER(_name), _size)
+void *xc__hypercall_buffer_array_get(xc_interface *xch, xc_hypercall_buffer_array_t *array,
+                                     unsigned index, xc_hypercall_buffer_t *hbuf);
+#define xc_hypercall_buffer_array_get(_xch, _array, _index, _name, _size) \
+    xc__hypercall_buffer_array_get(_xch, _array, _index, HYPERCALL_BUFFER(_name))
+void xc_hypercall_buffer_array_destroy(xc_interface *xc, xc_hypercall_buffer_array_t *array);
+
+/*
+ * CPUMAP handling
+ */
+typedef uint8_t *xc_cpumap_t;
+
+/* return maximum number of cpus the hypervisor supports */
+int xc_get_max_cpus(xc_interface *xch);
+
+/* return the number of online cpus */
+int xc_get_online_cpus(xc_interface *xch);
+
+/* return array size for cpumap */
+int xc_get_cpumap_size(xc_interface *xch);
+
+/* allocate a cpumap */
+xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
+
+/* clear an CPU from the cpumap. */
+void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map);
+
+/* set an CPU in the cpumap. */
+void xc_cpumap_setcpu(int cpu, xc_cpumap_t map);
+
+/* Test whether the CPU in cpumap is set. */
+int xc_cpumap_testcpu(int cpu, xc_cpumap_t map);
+
+/*
+ * NODEMAP handling
+ */
+typedef uint8_t *xc_nodemap_t;
+
+/* return maximum number of NUMA nodes the hypervisor supports */
+int xc_get_max_nodes(xc_interface *xch);
+
+/* return array size for nodemap */
+int xc_get_nodemap_size(xc_interface *xch);
+
+/* allocate a nodemap */
+xc_nodemap_t xc_nodemap_alloc(xc_interface *xch);
+
+/*
+ * DOMAIN DEBUGGING FUNCTIONS
+ */
+
+typedef struct xc_core_header {
+    unsigned int xch_magic;
+    unsigned int xch_nr_vcpus;
+    unsigned int xch_nr_pages;
+    unsigned int xch_ctxt_offset;
+    unsigned int xch_index_offset;
+    unsigned int xch_pages_offset;
+} xc_core_header_t;
+
+#define XC_CORE_MAGIC     0xF00FEBED
+#define XC_CORE_MAGIC_HVM 0xF00FEBEE
+
+/*
+ * DOMAIN MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_dominfo {
+    uint32_t      domid;
+    uint32_t      ssidref;
+    unsigned int  dying:1, crashed:1, shutdown:1,
+                  paused:1, blocked:1, running:1,
+                  hvm:1, debugged:1, xenstore:1, hap:1;
+    unsigned int  shutdown_reason; /* only meaningful if shutdown==1 */
+    unsigned long nr_pages; /* current number, not maximum */
+    unsigned long nr_outstanding_pages;
+    unsigned long nr_shared_pages;
+    unsigned long nr_paged_pages;
+    unsigned long shared_info_frame;
+    uint64_t      cpu_time;
+    unsigned long max_memkb;
+    unsigned int  nr_online_vcpus;
+    unsigned int  max_vcpu_id;
+    xen_domain_handle_t handle;
+    unsigned int  cpupool;
+    struct xen_arch_domainconfig arch_config;
+} xc_dominfo_t;
+
+typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
+
+typedef union 
+{
+#if defined(__i386__) || defined(__x86_64__)
+    vcpu_guest_context_x86_64_t x64;
+    vcpu_guest_context_x86_32_t x32;   
+#endif
+    vcpu_guest_context_t c;
+} vcpu_guest_context_any_t;
+
+typedef union
+{
+#if defined(__i386__) || defined(__x86_64__)
+    shared_info_x86_64_t x64;
+    shared_info_x86_32_t x32;
+#endif
+    shared_info_t s;
+} shared_info_any_t;
+
+#if defined(__i386__) || defined(__x86_64__)
+typedef union
+{
+    start_info_x86_64_t x64;
+    start_info_x86_32_t x32;
+    start_info_t s;
+} start_info_any_t;
+#endif
+
+typedef struct xc_vcpu_extstate {
+    uint64_t xfeature_mask;
+    uint64_t size;
+    void *buffer;
+} xc_vcpu_extstate_t;
+
+int xc_domain_create(xc_interface *xch, uint32_t *pdomid,
+                     struct xen_domctl_createdomain *config);
+
+
+/* Functions to produce a dump of a given domain
+ *  xc_domain_dumpcore - produces a dump to a specified file
+ *  xc_domain_dumpcore_via_callback - produces a dump, using a specified
+ *                                    callback function
+ */
+int xc_domain_dumpcore(xc_interface *xch,
+                       uint32_t domid,
+                       const char *corename);
+
+/* Define the callback function type for xc_domain_dumpcore_via_callback.
+ *
+ * This function is called by the coredump code for every "write",
+ * and passes an opaque object for the use of the function and
+ * created by the caller of xc_domain_dumpcore_via_callback.
+ */
+typedef int (dumpcore_rtn_t)(xc_interface *xch,
+                             void *arg, char *buffer, unsigned int length);
+
+int xc_domain_dumpcore_via_callback(xc_interface *xch,
+                                    uint32_t domid,
+                                    void *arg,
+                                    dumpcore_rtn_t dump_rtn);
+
+/*
+ * This function sets the maximum number of vcpus that a domain may create.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id in which vcpus are to be created.
+ * @parm max the maximum number of vcpus that the domain may create.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_max_vcpus(xc_interface *xch,
+                        uint32_t domid,
+                        unsigned int max);
+
+/**
+ * This function pauses a domain. A paused domain still exists in memory
+ * however it does not receive any timeslices from the hypervisor.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to pause
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_pause(xc_interface *xch,
+                    uint32_t domid);
+/**
+ * This function unpauses a domain.  The domain should have been previously
+ * paused.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to unpause
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_unpause(xc_interface *xch,
+                      uint32_t domid);
+
+/**
+ * This function will destroy a domain.  Destroying a domain removes the domain
+ * completely from memory.  This function should be called after sending the
+ * domain a SHUTDOWN control message to free up the domain resources.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to destroy
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_destroy(xc_interface *xch,
+                      uint32_t domid);
+
+
+/**
+ * This function resumes a suspended domain. The domain should have
+ * been previously suspended.
+ *
+ * Note that there are 'xc_domain_suspend' as suspending a domain
+ * is quite the endeavour.
+ *
+ * For the purpose of this explanation there are three guests:
+ * PV (using hypercalls for privilgied operations), HVM
+ * (fully hardware virtualized guests using emulated devices for everything),
+ * and PVHVM (PV aware with hardware virtualisation).
+ *
+ * HVM guest are the simplest - they suspend via S3 / S4 and resume from
+ * S3 / S4. Upon resume they have to re-negotiate with the emulated devices.
+ *
+ * PV and PVHVM communicate via hypercalls for suspend (and resume).
+ * For suspend the toolstack initiates the process by writing an value
+ * in XenBus "control/shutdown" with the string "suspend".
+ *
+ * The PV guest stashes anything it deems neccessary in 'struct
+ * start_info' in case of failure (PVHVM may ignore this) and calls
+ * the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall (for PV as
+ * argument it passes the MFN to 'struct start_info').
+ *
+ * And then the guest is suspended.
+ *
+ * The checkpointing or notifying a guest that the suspend failed or
+ * cancelled (in case of checkpoint) is by having the
+ * SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return a non-zero
+ * value.
+ *
+ * The PV and PVHVM resume path are similar. For PV it would be
+ * similar to bootup - figure out where the 'struct start_info' is (or
+ * if the suspend was cancelled aka checkpointed - reuse the saved
+ * values).
+ *
+ * From here on they differ depending whether the guest is PV or PVHVM
+ * in specifics but follow overall the same path:
+ *  - PV: Bringing up the vCPUS,
+ *  - PVHVM: Setup vector callback,
+ *  - Bring up vCPU runstates,
+ *  - Remap the grant tables if checkpointing or setup from scratch,
+ *
+ *
+ * If the resume was not checkpointing (or if suspend was succesful) we would
+ * setup the PV timers and the different PV events. Lastly the PV drivers
+ * re-negotiate with the backend.
+ *
+ * This function would return before the guest started resuming. That is
+ * the guest would be in non-running state and its vCPU context would be
+ * in the the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return path
+ * (for PV and PVHVM). For HVM it would be in would be in QEMU emulated
+ * BIOS handling S3 suspend.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to resume
+ * @parm fast use cooperative resume (guest must support this)
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_resume(xc_interface *xch,
+                    uint32_t domid,
+                    int fast);
+
+/**
+ * This function will shutdown a domain. This is intended for use in
+ * fully-virtualized domains where this operation is analogous to the
+ * sched_op operations in a paravirtualized domain. The caller is
+ * expected to give the reason for the shutdown.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to destroy
+ * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_shutdown(xc_interface *xch,
+                       uint32_t domid,
+                       int reason);
+
+int xc_watchdog(xc_interface *xch,
+               uint32_t id,
+               uint32_t timeout);
+
+/**
+ * This function explicitly sets the host NUMA nodes the domain will
+ * have affinity with.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id one wants to set the affinity of.
+ * @parm nodemap the map of the affine nodes.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_node_setaffinity(xc_interface *xch,
+                               uint32_t domind,
+                               xc_nodemap_t nodemap);
+
+/**
+ * This function retrieves the host NUMA nodes the domain has
+ * affinity with.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id one wants to get the node affinity of.
+ * @parm nodemap the map of the affine nodes.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_node_getaffinity(xc_interface *xch,
+                               uint32_t domind,
+                               xc_nodemap_t nodemap);
+
+/**
+ * This function specifies the CPU affinity for a vcpu.
+ *
+ * There are two kinds of affinity. Soft affinity is on what CPUs a vcpu
+ * prefers to run. Hard affinity is on what CPUs a vcpu is allowed to run.
+ * If flags contains XEN_VCPUAFFINITY_SOFT, the soft affinity it is set to
+ * what cpumap_soft_inout contains. If flags contains XEN_VCPUAFFINITY_HARD,
+ * the hard affinity is set to what cpumap_hard_inout contains. Both flags
+ * can be set at the same time, in which case both soft and hard affinity are
+ * set to what the respective parameter contains.
+ *
+ * The function also returns the effective hard or/and soft affinity, still
+ * via the cpumap_soft_inout and cpumap_hard_inout parameters. Effective
+ * affinity is, in case of soft affinity, the intersection of soft affinity,
+ * hard affinity and the cpupool's online CPUs for the domain, and is returned
+ * in cpumap_soft_inout, if XEN_VCPUAFFINITY_SOFT is set in flags. In case of
+ * hard affinity, it is the intersection between hard affinity and the
+ * cpupool's online CPUs, and is returned in cpumap_hard_inout, if
+ * XEN_VCPUAFFINITY_HARD is set in flags. If both flags are set, both soft
+ * and hard affinity are returned in the respective parameter.
+ *
+ * We do report it back as effective affinity is what the Xen scheduler will
+ * actually use, and we thus allow checking whether or not that matches with,
+ * or at least is good enough for, the caller's purposes.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap_hard_inout specifies(/returns) the (effective) hard affinity
+ * @param cpumap_soft_inout specifies(/returns) the (effective) soft affinity
+ * @param flags what we want to set
+ */
+int xc_vcpu_setaffinity(xc_interface *xch,
+                        uint32_t domid,
+                        int vcpu,
+                        xc_cpumap_t cpumap_hard_inout,
+                        xc_cpumap_t cpumap_soft_inout,
+                        uint32_t flags);
+
+/**
+ * This function retrieves hard and soft CPU affinity of a vcpu,
+ * depending on what flags are set.
+ *
+ * Soft affinity is returned in cpumap_soft if XEN_VCPUAFFINITY_SOFT is set.
+ * Hard affinity is returned in cpumap_hard if XEN_VCPUAFFINITY_HARD is set.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap_hard is where hard affinity is returned
+ * @param cpumap_soft is where soft affinity is returned
+ * @param flags what we want get
+ */
+int xc_vcpu_getaffinity(xc_interface *xch,
+                        uint32_t domid,
+                        int vcpu,
+                        xc_cpumap_t cpumap_hard,
+                        xc_cpumap_t cpumap_soft,
+                        uint32_t flags);
+
+
+/**
+ * This function will return the guest_width (in bytes) for the
+ * specified domain.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the domain id one wants the address size width of.
+ * @param addr_size the address size.
+ */
+int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
+                              unsigned int *guest_width);
+
+
+/**
+ * This function will return information about one or more domains. It is
+ * designed to iterate over the list of domains. If a single domain is
+ * requested, this function will return the next domain in the list - if
+ * one exists. It is, therefore, important in this case to make sure the
+ * domain requested was the one returned.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm first_domid the first domain to enumerate information from.  Domains
+ *                   are currently enumerate in order of creation.
+ * @parm max_doms the number of elements in info
+ * @parm info an array of max_doms size that will contain the information for
+ *            the enumerated domains.
+ * @return the number of domains enumerated or -1 on error
+ */
+int xc_domain_getinfo(xc_interface *xch,
+                      uint32_t first_domid,
+                      unsigned int max_doms,
+                      xc_dominfo_t *info);
+
+
+/**
+ * This function will set the execution context for the specified vcpu.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to set the vcpu context for
+ * @parm vcpu the vcpu number for the context
+ * @parm ctxt pointer to the the cpu context with the values to set
+ * @return the number of domains enumerated or -1 on error
+ */
+int xc_vcpu_setcontext(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t vcpu,
+                       vcpu_guest_context_any_t *ctxt);
+/**
+ * This function will return information about one or more domains, using a
+ * single hypercall.  The domain information will be stored into the supplied
+ * array of xc_domaininfo_t structures.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm first_domain the first domain to enumerate information from.
+ *                    Domains are currently enumerate in order of creation.
+ * @parm max_domains the number of elements in info
+ * @parm info an array of max_doms size that will contain the information for
+ *            the enumerated domains.
+ * @return the number of domains enumerated or -1 on error
+ */
+int xc_domain_getinfolist(xc_interface *xch,
+                          uint32_t first_domain,
+                          unsigned int max_domains,
+                          xc_domaininfo_t *info);
+
+/**
+ * This function set p2m for broken page
+ * &parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id which broken page belong to
+ * @parm pfn the pfn number of the broken page
+ * @return 0 on success, -1 on failure
+ */
+int xc_set_broken_page_p2m(xc_interface *xch,
+                           uint32_t domid,
+                           unsigned long pfn);
+
+/**
+ * This function returns information about the context of a hvm domain
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm ctxt_buf a pointer to a structure to store the execution context of
+ *            the hvm domain
+ * @parm size the size of ctxt_buf in bytes
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_getcontext(xc_interface *xch,
+                             uint32_t domid,
+                             uint8_t *ctxt_buf,
+                             uint32_t size);
+
+
+/**
+ * This function returns one element of the context of a hvm domain
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm typecode which type of elemnt required 
+ * @parm instance which instance of the type
+ * @parm ctxt_buf a pointer to a structure to store the execution context of
+ *            the hvm domain
+ * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_getcontext_partial(xc_interface *xch,
+                                     uint32_t domid,
+                                     uint16_t typecode,
+                                     uint16_t instance,
+                                     void *ctxt_buf,
+                                     uint32_t size);
+
+/**
+ * This function will set the context for hvm domain
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to set the hvm domain context for
+ * @parm hvm_ctxt pointer to the the hvm context with the values to set
+ * @parm size the size of hvm_ctxt in bytes
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_setcontext(xc_interface *xch,
+                             uint32_t domid,
+                             uint8_t *hvm_ctxt,
+                             uint32_t size);
+
+/**
+ * This function will return guest IO ABI protocol
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get IO ABI protocol for
+ * @return guest protocol on success, NULL on failure
+ */
+const char *xc_domain_get_native_protocol(xc_interface *xch,
+                                          uint32_t domid);
+
+/**
+ * This function returns information about the execution context of a
+ * particular vcpu of a domain.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm vcpu the vcpu number
+ * @parm ctxt a pointer to a structure to store the execution context of the
+ *            domain
+ * @return 0 on success, -1 on failure
+ */
+int xc_vcpu_getcontext(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t vcpu,
+                       vcpu_guest_context_any_t *ctxt);
+
+/**
+ * This function initializes the vuart emulation and returns
+ * the event to be used by the backend for communicating with
+ * the emulation code.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * #parm type type of vuart
+ * @parm domid the domain to get information from
+ * @parm console_domid the domid of the backend console
+ * @parm gfn the guest pfn to be used as the ring buffer
+ * @parm evtchn the event channel to be used for events
+ * @return 0 on success, negative error on failure
+ */
+int xc_dom_vuart_init(xc_interface *xch,
+                      uint32_t type,
+                      uint32_t domid,
+                      uint32_t console_domid,
+                      xen_pfn_t gfn,
+                      evtchn_port_t *evtchn);
+
+/**
+ * This function returns information about the XSAVE state of a particular
+ * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
+ * the call is considered a query to retrieve them and the buffer is not
+ * filled.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm vcpu the vcpu number
+ * @parm extstate a pointer to a structure to store the XSAVE state of the
+ *                domain
+ * @return 0 on success, negative error code on failure
+ */
+int xc_vcpu_get_extstate(xc_interface *xch,
+                         uint32_t domid,
+                         uint32_t vcpu,
+                         xc_vcpu_extstate_t *extstate);
+
+typedef struct xen_domctl_getvcpuinfo xc_vcpuinfo_t;
+int xc_vcpu_getinfo(xc_interface *xch,
+                    uint32_t domid,
+                    uint32_t vcpu,
+                    xc_vcpuinfo_t *info);
+
+long long xc_domain_get_cpu_usage(xc_interface *xch,
+                                  uint32_t domid,
+                                  int vcpu);
+
+int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
+                        xen_domain_handle_t handle);
+
+typedef struct xen_domctl_shadow_op_stats xc_shadow_op_stats_t;
+int xc_shadow_control(xc_interface *xch,
+                      uint32_t domid,
+                      unsigned int sop,
+                      xc_hypercall_buffer_t *dirty_bitmap,
+                      unsigned long pages,
+                      unsigned long *mb,
+                      uint32_t mode,
+                      xc_shadow_op_stats_t *stats);
+
+int xc_sched_credit_domain_set(xc_interface *xch,
+                               uint32_t domid,
+                               struct xen_domctl_sched_credit *sdom);
+
+int xc_sched_credit_domain_get(xc_interface *xch,
+                               uint32_t domid,
+                               struct xen_domctl_sched_credit *sdom);
+int xc_sched_credit_params_set(xc_interface *xch,
+                               uint32_t cpupool_id,
+                               struct xen_sysctl_credit_schedule *schedule);
+int xc_sched_credit_params_get(xc_interface *xch,
+                               uint32_t cpupool_id,
+                               struct xen_sysctl_credit_schedule *schedule);
+
+int xc_sched_credit2_params_set(xc_interface *xch,
+                                uint32_t cpupool_id,
+                                struct xen_sysctl_credit2_schedule *schedule);
+int xc_sched_credit2_params_get(xc_interface *xch,
+                                uint32_t cpupool_id,
+                                struct xen_sysctl_credit2_schedule *schedule);
+int xc_sched_credit2_domain_set(xc_interface *xch,
+                                uint32_t domid,
+                                struct xen_domctl_sched_credit2 *sdom);
+int xc_sched_credit2_domain_get(xc_interface *xch,
+                                uint32_t domid,
+                                struct xen_domctl_sched_credit2 *sdom);
+
+int xc_sched_rtds_domain_set(xc_interface *xch,
+                             uint32_t domid,
+                             struct xen_domctl_sched_rtds *sdom);
+int xc_sched_rtds_domain_get(xc_interface *xch,
+                             uint32_t domid,
+                             struct xen_domctl_sched_rtds *sdom);
+int xc_sched_rtds_vcpu_set(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus);
+int xc_sched_rtds_vcpu_get(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus);
+
+int
+xc_sched_arinc653_schedule_set(
+    xc_interface *xch,
+    uint32_t cpupool_id,
+    struct xen_sysctl_arinc653_schedule *schedule);
+
+int
+xc_sched_arinc653_schedule_get(
+    xc_interface *xch,
+    uint32_t cpupool_id,
+    struct xen_sysctl_arinc653_schedule *schedule);
+
+/**
+ * This function sends a trigger to a domain.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to send trigger
+ * @parm trigger the trigger type
+ * @parm vcpu the vcpu number to send trigger 
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_send_trigger(xc_interface *xch,
+                           uint32_t domid,
+                           uint32_t trigger,
+                           uint32_t vcpu);
+
+/**
+ * This function enables or disable debugging of a domain.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to send trigger
+ * @parm enable true to enable debugging
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_setdebugging(xc_interface *xch,
+                           uint32_t domid,
+                           unsigned int enable);
+
+/**
+ * This function audits the (top level) p2m of a domain 
+ * and returns the different error counts, if any.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id whose top level p2m we 
+ *       want to audit
+ * @parm orphans count of m2p entries for valid
+ *       domain pages containing an invalid value
+ * @parm m2p_bad count of m2p entries mismatching the
+ *       associated p2m entry for this domain
+ * @parm p2m_bad count of p2m entries for this domain
+ *       mismatching the associated m2p entry
+ * return 0 on success, -1 on failure
+ * errno values on failure include: 
+ *          -ENOSYS: not implemented
+ *          -EFAULT: could not copy results back to guest
+ */
+int xc_domain_p2m_audit(xc_interface *xch,
+                        uint32_t domid,
+                        uint64_t *orphans,
+                        uint64_t *m2p_bad,   
+                        uint64_t *p2m_bad);
+
+/**
+ * This function sets or clears the requirement that an access memory
+ * event listener is required on the domain.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to send trigger
+ * @parm enable true to require a listener
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_set_access_required(xc_interface *xch,
+                                 uint32_t domid,
+                                 unsigned int required);
+/**
+ * This function sets the handler of global VIRQs sent by the hypervisor
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id which will handle the VIRQ
+ * @parm virq the virq number (VIRQ_*)
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
+
+/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+    uint32_t cpupool_id;
+    uint32_t sched_id;
+    uint32_t n_dom;
+    xc_cpumap_t cpumap;
+} xc_cpupoolinfo_t;
+
+#define XC_CPUPOOL_POOLID_ANY 0xFFFFFFFF
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(xc_interface *xch,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(xc_interface *xch,
+                       uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (to be freed via xc_cpupool_infofree)
+ */
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+                       uint32_t poolid);
+
+/**
+ * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm info area to free
+ */
+void xc_cpupool_infofree(xc_interface *xch,
+                         xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(xc_interface *xch,
+                      uint32_t poolid,
+                      int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(xc_interface *xch,
+                         uint32_t poolid,
+                         int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(xc_interface *xch,
+                          uint32_t poolid,
+                          uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * return cpumap array on success, NULL else
+ */
+xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch);
+
+/*
+ * EVENT CHANNEL FUNCTIONS
+ *
+ * None of these do any logging.
+ */
+
+/* A port identifier is guaranteed to fit in 31 bits. */
+typedef int xc_evtchn_port_or_error_t;
+
+/**
+ * This function allocates an unbound port.  Ports are named endpoints used for
+ * interdomain communication.  This function is most useful in opening a
+ * well-known port within a domain to receive events on.
+ * 
+ * NOTE: If you are allocating a *local* unbound port, you probably want to
+ * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
+ * ports *only* during domain creation.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm dom the ID of the local domain (the 'allocatee')
+ * @parm remote_dom the ID of the domain who will later bind
+ * @return allocated port (in @dom) on success, -1 on failure
+ */
+xc_evtchn_port_or_error_t
+xc_evtchn_alloc_unbound(xc_interface *xch,
+                        uint32_t dom,
+                        uint32_t remote_dom);
+
+int xc_evtchn_reset(xc_interface *xch,
+                    uint32_t dom);
+
+typedef struct evtchn_status xc_evtchn_status_t;
+int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status);
+
+
+
+int xc_physdev_pci_access_modify(xc_interface *xch,
+                                 uint32_t domid,
+                                 int bus,
+                                 int dev,
+                                 int func,
+                                 int enable);
+
+int xc_readconsolering(xc_interface *xch,
+                       char *buffer,
+                       unsigned int *pnr_chars,
+                       int clear, int incremental, uint32_t *pindex);
+
+int xc_send_debug_keys(xc_interface *xch, const char *keys);
+
+typedef struct xen_sysctl_physinfo xc_physinfo_t;
+typedef struct xen_sysctl_cputopo xc_cputopo_t;
+typedef struct xen_sysctl_numainfo xc_numainfo_t;
+typedef struct xen_sysctl_meminfo xc_meminfo_t;
+typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
+
+typedef uint32_t xc_cpu_to_node_t;
+typedef uint32_t xc_cpu_to_socket_t;
+typedef uint32_t xc_cpu_to_core_t;
+typedef uint64_t xc_node_to_memsize_t;
+typedef uint64_t xc_node_to_memfree_t;
+typedef uint32_t xc_node_to_node_dist_t;
+
+int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
+int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
+                   xc_cputopo_t *cputopo);
+int xc_microcode_update(xc_interface *xch, const void *buf, size_t len);
+int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
+                xc_meminfo_t *meminfo, uint32_t *distance);
+int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
+                   physdev_pci_device_t *devs, uint32_t *nodes);
+
+int xc_sched_id(xc_interface *xch,
+                int *sched_id);
+
+int xc_machphys_mfn_list(xc_interface *xch,
+                         unsigned long max_extents,
+                         xen_pfn_t *extent_start);
+
+typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
+int xc_getcpuinfo(xc_interface *xch, int max_cpus,
+                  xc_cpuinfo_t *info, int *nr_cpus); 
+
+int xc_domain_setmaxmem(xc_interface *xch,
+                        uint32_t domid,
+                        uint64_t max_memkb);
+
+int xc_domain_set_memmap_limit(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long map_limitkb);
+
+int xc_domain_setvnuma(xc_interface *xch,
+                        uint32_t domid,
+                        uint32_t nr_vnodes,
+                        uint32_t nr_regions,
+                        uint32_t nr_vcpus,
+                        xen_vmemrange_t *vmemrange,
+                        unsigned int *vdistance,
+                        unsigned int *vcpu_to_vnode,
+                        unsigned int *vnode_to_pnode);
+/*
+ * Retrieve vnuma configuration
+ * domid: IN, target domid
+ * nr_vnodes: IN/OUT, number of vnodes, not NULL
+ * nr_vmemranges: IN/OUT, number of vmemranges, not NULL
+ * nr_vcpus: IN/OUT, number of vcpus, not NULL
+ * vmemranges: OUT, an array which has length of nr_vmemranges
+ * vdistance: OUT, an array which has length of nr_vnodes * nr_vnodes
+ * vcpu_to_vnode: OUT, an array which has length of nr_vcpus
+ */
+int xc_domain_getvnuma(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t *nr_vnodes,
+                       uint32_t *nr_vmemranges,
+                       uint32_t *nr_vcpus,
+                       xen_vmemrange_t *vmemrange,
+                       unsigned int *vdistance,
+                       unsigned int *vcpu_to_vnode);
+
+int xc_domain_soft_reset(xc_interface *xch,
+                         uint32_t domid);
+
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * PC BIOS standard E820 types and structure.
+ */
+#define E820_RAM          1
+#define E820_RESERVED     2
+#define E820_ACPI         3
+#define E820_NVS          4
+#define E820_UNUSABLE     5
+
+#define E820MAX           (128)
+
+struct e820entry {
+    uint64_t addr;
+    uint64_t size;
+    uint32_t type;
+} __attribute__((packed));
+int xc_domain_set_memory_map(xc_interface *xch,
+                               uint32_t domid,
+                               struct e820entry entries[],
+                               uint32_t nr_entries);
+
+int xc_get_machine_memory_map(xc_interface *xch,
+                              struct e820entry entries[],
+                              uint32_t max_entries);
+#endif
+
+int xc_reserved_device_memory_map(xc_interface *xch,
+                                  uint32_t flags,
+                                  uint16_t seg,
+                                  uint8_t bus,
+                                  uint8_t devfn,
+                                  struct xen_reserved_device_memory entries[],
+                                  uint32_t *max_entries);
+int xc_domain_set_time_offset(xc_interface *xch,
+                              uint32_t domid,
+                              int32_t time_offset_seconds);
+
+int xc_domain_set_tsc_info(xc_interface *xch,
+                           uint32_t domid,
+                           uint32_t tsc_mode,
+                           uint64_t elapsed_nsec,
+                           uint32_t gtsc_khz,
+                           uint32_t incarnation);
+
+int xc_domain_get_tsc_info(xc_interface *xch,
+                           uint32_t domid,
+                           uint32_t *tsc_mode,
+                           uint64_t *elapsed_nsec,
+                           uint32_t *gtsc_khz,
+                           uint32_t *incarnation);
+
+int xc_domain_disable_migrate(xc_interface *xch, uint32_t domid);
+
+int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
+
+int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
+
+int xc_domain_increase_reservation(xc_interface *xch,
+                                   uint32_t domid,
+                                   unsigned long nr_extents,
+                                   unsigned int extent_order,
+                                   unsigned int mem_flags,
+                                   xen_pfn_t *extent_start);
+
+int xc_domain_increase_reservation_exact(xc_interface *xch,
+                                         uint32_t domid,
+                                         unsigned long nr_extents,
+                                         unsigned int extent_order,
+                                         unsigned int mem_flags,
+                                         xen_pfn_t *extent_start);
+
+int xc_domain_decrease_reservation(xc_interface *xch,
+                                   uint32_t domid,
+                                   unsigned long nr_extents,
+                                   unsigned int extent_order,
+                                   xen_pfn_t *extent_start);
+
+int xc_domain_decrease_reservation_exact(xc_interface *xch,
+                                         uint32_t domid,
+                                         unsigned long nr_extents,
+                                         unsigned int extent_order,
+                                         xen_pfn_t *extent_start);
+
+int xc_domain_add_to_physmap(xc_interface *xch,
+                             uint32_t domid,
+                             unsigned int space,
+                             unsigned long idx,
+                             xen_pfn_t gpfn);
+
+int xc_domain_add_to_physmap_batch(xc_interface *xch,
+                                   uint32_t domid,
+                                   uint32_t foreign_domid,
+                                   unsigned int space,
+                                   unsigned int size,
+                                   xen_ulong_t *idxs,
+                                   xen_pfn_t *gfpns,
+                                   int *errs);
+
+int xc_domain_remove_from_physmap(xc_interface *xch,
+                                  uint32_t domid,
+                                  xen_pfn_t gpfn);
+
+int xc_domain_populate_physmap(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long nr_extents,
+                               unsigned int extent_order,
+                               unsigned int mem_flags,
+                               xen_pfn_t *extent_start);
+
+int xc_domain_populate_physmap_exact(xc_interface *xch,
+                                     uint32_t domid,
+                                     unsigned long nr_extents,
+                                     unsigned int extent_order,
+                                     unsigned int mem_flags,
+                                     xen_pfn_t *extent_start);
+
+int xc_domain_claim_pages(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long nr_pages);
+
+int xc_domain_memory_exchange_pages(xc_interface *xch,
+                                    uint32_t domid,
+                                    unsigned long nr_in_extents,
+                                    unsigned int in_order,
+                                    xen_pfn_t *in_extents,
+                                    unsigned long nr_out_extents,
+                                    unsigned int out_order,
+                                    xen_pfn_t *out_extents);
+
+int xc_domain_set_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t target_pages,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries);
+
+int xc_domain_get_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries);
+
+int xc_domain_ioport_permission(xc_interface *xch,
+                                uint32_t domid,
+                                uint32_t first_port,
+                                uint32_t nr_ports,
+                                uint32_t allow_access);
+
+int xc_domain_irq_permission(xc_interface *xch,
+                             uint32_t domid,
+                             uint8_t pirq,
+                             uint8_t allow_access);
+
+int xc_domain_iomem_permission(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long first_mfn,
+                               unsigned long nr_mfns,
+                               uint8_t allow_access);
+
+unsigned long xc_make_page_below_4G(xc_interface *xch, uint32_t domid,
+                                    unsigned long mfn);
+
+typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
+typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
+int xc_perfc_reset(xc_interface *xch);
+int xc_perfc_query_number(xc_interface *xch,
+                          int *nbr_desc,
+                          int *nbr_val);
+int xc_perfc_query(xc_interface *xch,
+                   xc_hypercall_buffer_t *desc,
+                   xc_hypercall_buffer_t *val);
+
+typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
+int xc_lockprof_reset(xc_interface *xch);
+int xc_lockprof_query_number(xc_interface *xch,
+                             uint32_t *n_elems);
+int xc_lockprof_query(xc_interface *xch,
+                      uint32_t *n_elems,
+                      uint64_t *time,
+                      xc_hypercall_buffer_t *data);
+
+void *xc_memalign(xc_interface *xch, size_t alignment, size_t size);
+
+/**
+ * Avoid using this function, as it does not work for all cases (such
+ * as 4M superpages, or guests using PSE36). Only used for debugging.
+ *
+ * Translates a virtual address in the context of a given domain and
+ * vcpu returning the GFN containing the address (that is, an MFN for 
+ * PV guests, a PFN for HVM guests).  Returns 0 for failure.
+ *
+ * @parm xch a handle on an open hypervisor interface
+ * @parm dom the domain to perform the translation in
+ * @parm vcpu the vcpu to perform the translation on
+ * @parm virt the virtual address to translate
+ */
+unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
+                                           int vcpu, unsigned long long virt);
+
+
+int xc_copy_to_domain_page(xc_interface *xch, uint32_t domid,
+                           unsigned long dst_pfn, const char *src_page);
+
+int xc_clear_domain_pages(xc_interface *xch, uint32_t domid,
+                          unsigned long dst_pfn, int num);
+
+static inline int xc_clear_domain_page(xc_interface *xch, uint32_t domid,
+                                       unsigned long dst_pfn)
+{
+    return xc_clear_domain_pages(xch, domid, dst_pfn, 1);
+}
+
+int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
+                 uint32_t dom);
+
+/* System wide memory properties */
+int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn);
+
+/* Get current total pages allocated to a domain. */
+long xc_get_tot_pages(xc_interface *xch, uint32_t domid);
+
+/**
+ * This function retrieves the the number of bytes available
+ * in the heap in a specific range of address-widths and nodes.
+ * 
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain to query
+ * @parm min_width the smallest address width to query (0 if don't care)
+ * @parm max_width the largest address width to query (0 if don't care)
+ * @parm node the node to query (-1 for all)
+ * @parm *bytes caller variable to put total bytes counted
+ * @return 0 on success, <0 on failure.
+ */
+int xc_availheap(xc_interface *xch, int min_width, int max_width, int node,
+                 uint64_t *bytes);
+
+/*
+ * Trace Buffer Operations
+ */
+
+/**
+ * xc_tbuf_enable - enable tracing buffers
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm cnt size of tracing buffers to create (in pages)
+ * @parm mfn location to store mfn of the trace buffers to
+ * @parm size location to store the size (in bytes) of a trace buffer to
+ *
+ * Gets the machine address of the trace pointer area and the size of the
+ * per CPU buffers.
+ */
+int xc_tbuf_enable(xc_interface *xch, unsigned long pages,
+                   unsigned long *mfn, unsigned long *size);
+
+/*
+ * Disable tracing buffers.
+ */
+int xc_tbuf_disable(xc_interface *xch);
+
+/**
+ * This function sets the size of the trace buffers. Setting the size
+ * is currently a one-shot operation that may be performed either at boot
+ * time or via this interface, not both. The buffer size must be set before
+ * enabling tracing.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm size the size in pages per cpu for the trace buffers
+ * @return 0 on success, -1 on failure.
+ */
+int xc_tbuf_set_size(xc_interface *xch, unsigned long size);
+
+/**
+ * This function retrieves the current size of the trace buffers.
+ * Note that the size returned is in terms of bytes, not pages.
+
+ * @parm xch a handle to an open hypervisor interface
+ * @parm size will contain the size in bytes for the trace buffers
+ * @return 0 on success, -1 on failure.
+ */
+int xc_tbuf_get_size(xc_interface *xch, unsigned long *size);
+
+int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask);
+
+int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask);
+
+int xc_domctl(xc_interface *xch, struct xen_domctl *domctl);
+int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl);
+
+int xc_version(xc_interface *xch, int cmd, void *arg);
+
+int xc_flask_op(xc_interface *xch, xen_flask_op_t *op);
+
+/*
+ * Subscribe to domain suspend via evtchn.
+ * Returns -1 on failure, in which case errno will be set appropriately.
+ * Just calls XEN_DOMCTL_subscribe - see the caveats for that domctl
+ * (in its doc comment in domctl.h).
+ */
+int xc_domain_subscribe_for_suspend(
+    xc_interface *xch, uint32_t domid, evtchn_port_t port);
+
+/**************************
+ * GRANT TABLE OPERATIONS *
+ **************************/
+
+/*
+ * These functions sometimes log messages as above, but not always.
+ */
+
+
+int xc_gnttab_op(xc_interface *xch, int cmd,
+                 void * op, int op_size, int count);
+/* Logs iff hypercall bounce fails, otherwise doesn't. */
+
+int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query);
+int xc_gnttab_get_version(xc_interface *xch, uint32_t domid); /* Never logs */
+grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid, int *gnt_num);
+grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid, int *gnt_num);
+/* Sometimes these don't set errno [fixme], and sometimes they don't log. */
+
+int xc_physdev_map_pirq(xc_interface *xch,
+                        uint32_t domid,
+                        int index,
+                        int *pirq);
+
+int xc_physdev_map_pirq_msi(xc_interface *xch,
+                            uint32_t domid,
+                            int index,
+                            int *pirq,
+                            int devfn,
+                            int bus,
+                            int entry_nr,
+                            uint64_t table_base);
+
+int xc_physdev_unmap_pirq(xc_interface *xch,
+                          uint32_t domid,
+                          int pirq);
+
+/*
+ *  LOGGING AND ERROR REPORTING
+ */
+
+
+#define XC_MAX_ERROR_MSG_LEN 1024
+typedef struct xc_error {
+  enum xc_error_code code;
+  char message[XC_MAX_ERROR_MSG_LEN];
+} xc_error;
+
+
+/*
+ * Convert an error code or level into a text description.  Return values
+ * are pointers to fixed strings and do not need to be freed.
+ * Do not fail, but return pointers to generic strings if fed bogus input.
+ */
+const char *xc_error_code_to_desc(int code);
+
+/*
+ * Convert an errno value to a text description.
+ */
+const char *xc_strerror(xc_interface *xch, int errcode);
+
+
+/*
+ * Return a pointer to the last error with level XC_REPORT_ERROR. This
+ * pointer and the data pointed to are only valid until the next call
+ * to libxc in the same thread.
+ */
+const xc_error *xc_get_last_error(xc_interface *handle);
+
+/*
+ * Clear the last error
+ */
+void xc_clear_last_error(xc_interface *xch);
+
+int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value);
+int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value);
+
+/* Deprecated: use xc_hvm_param_set/get() instead. */
+int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value);
+int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value);
+
+/* HVM guest pass-through */
+int xc_assign_device(xc_interface *xch,
+                     uint32_t domid,
+                     uint32_t machine_sbdf,
+                     uint32_t flag);
+
+int xc_get_device_group(xc_interface *xch,
+                     uint32_t domid,
+                     uint32_t machine_sbdf,
+                     uint32_t max_sdevs,
+                     uint32_t *num_sdevs,
+                     uint32_t *sdev_array);
+
+int xc_test_assign_device(xc_interface *xch,
+                          uint32_t domid,
+                          uint32_t machine_sbdf);
+
+int xc_deassign_device(xc_interface *xch,
+                     uint32_t domid,
+                     uint32_t machine_sbdf);
+
+int xc_assign_dt_device(xc_interface *xch,
+                        uint32_t domid,
+                        char *path);
+int xc_test_assign_dt_device(xc_interface *xch,
+                             uint32_t domid,
+                             char *path);
+int xc_deassign_dt_device(xc_interface *xch,
+                          uint32_t domid,
+                          char *path);
+
+int xc_domain_memory_mapping(xc_interface *xch,
+                             uint32_t domid,
+                             unsigned long first_gfn,
+                             unsigned long first_mfn,
+                             unsigned long nr_mfns,
+                             uint32_t add_mapping);
+
+int xc_domain_ioport_mapping(xc_interface *xch,
+                             uint32_t domid,
+                             uint32_t first_gport,
+                             uint32_t first_mport,
+                             uint32_t nr_ports,
+                             uint32_t add_mapping);
+
+int xc_domain_update_msi_irq(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t gvec,
+    uint32_t pirq,
+    uint32_t gflags,
+    uint64_t gtable);
+
+int xc_domain_unbind_msi_irq(xc_interface *xch,
+                             uint32_t domid,
+                             uint32_t gvec,
+                             uint32_t pirq,
+                             uint32_t gflags);
+
+int xc_domain_bind_pt_irq(xc_interface *xch,
+                          uint32_t domid,
+                          uint8_t machine_irq,
+                          uint8_t irq_type,
+                          uint8_t bus,
+                          uint8_t device,
+                          uint8_t intx,
+                          uint8_t isa_irq);
+
+int xc_domain_unbind_pt_irq(xc_interface *xch,
+                          uint32_t domid,
+                          uint8_t machine_irq,
+                          uint8_t irq_type,
+                          uint8_t bus,
+                          uint8_t device,
+                          uint8_t intx,
+                          uint8_t isa_irq);
+
+int xc_domain_bind_pt_pci_irq(xc_interface *xch,
+                              uint32_t domid,
+                              uint8_t machine_irq,
+                              uint8_t bus,
+                              uint8_t device,
+                              uint8_t intx);
+
+int xc_domain_bind_pt_isa_irq(xc_interface *xch,
+                              uint32_t domid,
+                              uint8_t machine_irq);
+
+int xc_domain_bind_pt_spi_irq(xc_interface *xch,
+                              uint32_t domid,
+                              uint16_t vspi,
+                              uint16_t spi);
+
+int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
+                                uint32_t domid,
+                                uint16_t vspi,
+                                uint16_t spi);
+
+/* Set the target domain */
+int xc_domain_set_target(xc_interface *xch,
+                         uint32_t domid,
+                         uint32_t target);
+
+/* Control the domain for debug */
+int xc_domain_debug_control(xc_interface *xch,
+                            uint32_t domid,
+                            uint32_t sop,
+                            uint32_t vcpu);
+
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * CPUID policy data, expressed in the legacy XEND format.
+ *
+ * Policy is an array of strings, 32 chars long:
+ *   policy[0] = eax
+ *   policy[1] = ebx
+ *   policy[2] = ecx
+ *   policy[3] = edx
+ *
+ * The format of the string is the following:
+ *   '1' -> force to 1
+ *   '0' -> force to 0
+ *   'x' -> we don't care (use default)
+ *   'k' -> pass through host value
+ *   's' -> legacy alias for 'k'
+ */
+struct xc_xend_cpuid {
+    union {
+        struct {
+            uint32_t leaf, subleaf;
+        };
+        uint32_t input[2];
+    };
+    char *policy[4];
+};
+
+/*
+ * Make adjustments to the CPUID settings for a domain.
+ *
+ * This path is used in two cases.  First, for fresh boots of the domain, and
+ * secondly for migrate-in/restore of pre-4.14 guests (where CPUID data was
+ * missing from the stream).  The @restore parameter distinguishes these
+ * cases, and the generated policy must be compatible with a 4.13.
+ *
+ * Either pass a full new @featureset (and @nr_features), or adjust individual
+ * features (@pae).
+ *
+ * Then (optionally) apply legacy XEND overrides (@xend) to the result.
+ */
+int xc_cpuid_apply_policy(xc_interface *xch,
+                          uint32_t domid, bool restore,
+                          const uint32_t *featureset,
+                          unsigned int nr_features, bool pae,
+                          const struct xc_xend_cpuid *xend);
+int xc_mca_op(xc_interface *xch, struct xen_mc *mc);
+int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
+                        xc_cpumap_t cpumap, unsigned int nr_cpus);
+#endif
+
+struct xc_px_val {
+    uint64_t freq;        /* Px core frequency */
+    uint64_t residency;   /* Px residency time */
+    uint64_t count;       /* Px transition count */
+};
+
+struct xc_px_stat {
+    uint8_t total;        /* total Px states */
+    uint8_t usable;       /* usable Px states */
+    uint8_t last;         /* last Px state */
+    uint8_t cur;          /* current Px state */
+    uint64_t *trans_pt;   /* Px transition table */
+    struct xc_px_val *pt;
+};
+
+int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px);
+int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt);
+int xc_pm_reset_pxstat(xc_interface *xch, int cpuid);
+
+struct xc_cx_stat {
+    uint32_t nr;           /* entry nr in triggers[]/residencies[], incl C0 */
+    uint32_t last;         /* last Cx state */
+    uint64_t idle_time;    /* idle time from boot */
+    uint64_t *triggers;    /* Cx trigger counts */
+    uint64_t *residencies; /* Cx residencies */
+    uint32_t nr_pc;        /* entry nr in pc[] */
+    uint32_t nr_cc;        /* entry nr in cc[] */
+    uint64_t *pc;          /* 1-biased indexing (i.e. excl C0) */
+    uint64_t *cc;          /* 1-biased indexing (i.e. excl C0) */
+};
+typedef struct xc_cx_stat xc_cx_stat_t;
+
+int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx);
+int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt);
+int xc_pm_reset_cxstat(xc_interface *xch, int cpuid);
+
+int xc_cpu_online(xc_interface *xch, int cpu);
+int xc_cpu_offline(xc_interface *xch, int cpu);
+int xc_smt_enable(xc_interface *xch);
+int xc_smt_disable(xc_interface *xch);
+
+/* 
+ * cpufreq para name of this structure named 
+ * same as sysfs file name of native linux
+ */
+typedef struct xen_userspace xc_userspace_t;
+typedef struct xen_ondemand xc_ondemand_t;
+
+struct xc_get_cpufreq_para {
+    /* IN/OUT variable */
+    uint32_t cpu_num;
+    uint32_t freq_num;
+    uint32_t gov_num;
+
+    /* for all governors */
+    /* OUT variable */
+    uint32_t *affected_cpus;
+    uint32_t *scaling_available_frequencies;
+    char     *scaling_available_governors;
+    char scaling_driver[CPUFREQ_NAME_LEN];
+
+    uint32_t cpuinfo_cur_freq;
+    uint32_t cpuinfo_max_freq;
+    uint32_t cpuinfo_min_freq;
+    uint32_t scaling_cur_freq;
+
+    char scaling_governor[CPUFREQ_NAME_LEN];
+    uint32_t scaling_max_freq;
+    uint32_t scaling_min_freq;
+
+    /* for specific governor */
+    union {
+        xc_userspace_t userspace;
+        xc_ondemand_t ondemand;
+    } u;
+
+    int32_t turbo_enabled;
+};
+
+int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
+                        struct xc_get_cpufreq_para *user_para);
+int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname);
+int xc_set_cpufreq_para(xc_interface *xch, int cpuid,
+                        int ctrl_type, int ctrl_value);
+int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq);
+
+int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value);
+
+int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value);
+int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value);
+
+int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value);
+int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value);
+
+int xc_enable_turbo(xc_interface *xch, int cpuid);
+int xc_disable_turbo(xc_interface *xch, int cpuid);
+
+/**
+ * altp2m operations
+ */
+
+int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state);
+int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state);
+int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
+                                     uint32_t vcpuid, xen_pfn_t gfn);
+int xc_altp2m_set_vcpu_disable_notify(xc_interface *handle, uint32_t domid,
+                                      uint32_t vcpuid);
+int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
+                          xenmem_access_t default_access, uint16_t *view_id);
+int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
+                           uint16_t view_id);
+/* Switch all vCPUs of the domain to the specified altp2m view */
+int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
+                             uint16_t view_id);
+int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
+                              uint16_t view_id, xen_pfn_t gfn, bool sve);
+int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
+                                   uint16_t view_id, xen_pfn_t first_gfn,
+                                   xen_pfn_t last_gfn, bool sve,
+                                   xen_pfn_t *error_gfn, int32_t *error_code);
+int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
+                              uint16_t view_id, xen_pfn_t gfn, bool *sve);
+int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
+                             uint16_t view_id, xen_pfn_t gfn,
+                             xenmem_access_t access);
+int xc_altp2m_set_mem_access_multi(xc_interface *handle, uint32_t domid,
+                                   uint16_t view_id, uint8_t *access,
+                                   uint64_t *gfns, uint32_t nr);
+int xc_altp2m_get_mem_access(xc_interface *handle, uint32_t domid,
+                             uint16_t view_id, xen_pfn_t gfn,
+                             xenmem_access_t *access);
+int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
+                         uint16_t view_id, xen_pfn_t old_gfn,
+                         xen_pfn_t new_gfn);
+int xc_altp2m_get_vcpu_p2m_idx(xc_interface *handle, uint32_t domid,
+                               uint32_t vcpuid, uint16_t *p2midx);
+/*
+ * Set view visibility for xc_altp2m_switch_to_view and vmfunc.
+ * Note: If altp2m mode is set to mixed the guest is able to change the view
+ * visibility and then call vmfunc.
+ */
+int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
+                             uint16_t view_id, bool visible);
+
+/** 
+ * Mem paging operations.
+ * Paging is supported only on the x86 architecture in 64 bit mode, with
+ * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
+ * support is considered experimental.
+ */
+int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
+int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
+int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
+int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
+                           uint64_t gfn);
+int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
+int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
+int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
+                       uint64_t gfn, void *buffer);
+
+/** 
+ * Access tracking operations.
+ * Supported only on Intel EPT 64 bit processors.
+ */
+
+/*
+ * Set a range of memory to a specific access.
+ * Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
+ * XENMEM_access_ + (rwx), and XENMEM_access_rx2rw
+ */
+int xc_set_mem_access(xc_interface *xch, uint32_t domain_id,
+                      xenmem_access_t access, uint64_t first_pfn,
+                      uint32_t nr);
+
+/*
+ * Set an array of pages to their respective access in the access array.
+ * The nr parameter specifies the size of the pages and access arrays.
+ * The same allowed access types as for xc_set_mem_access() apply.
+ */
+int xc_set_mem_access_multi(xc_interface *xch, uint32_t domain_id,
+                            uint8_t *access, uint64_t *pages,
+                            uint32_t nr);
+
+/*
+ * Gets the mem access for the given page (returned in access on success)
+ */
+int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
+                      uint64_t pfn, xenmem_access_t *access);
+
+/*
+ * Returns the VM_EVENT_INTERFACE version.
+ */
+int xc_vm_event_get_version(xc_interface *xch);
+
+/***
+ * Monitor control operations.
+ *
+ * Enables the VM event monitor ring and returns the mapped ring page.
+ * This ring is used to deliver mem_access events, as well a set of additional
+ * events that can be enabled with the xc_monitor_* functions.
+ *
+ * Will return NULL on error.
+ * Caller has to unmap this page when done.
+ */
+void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
+int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
+int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
+/*
+ * Get a bitmap of supported monitor events in the form
+ * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
+ */
+int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
+                                uint32_t *capabilities);
+int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
+                             uint16_t index, bool enable, bool sync,
+                             uint64_t bitmask, bool onchangeonly);
+/*
+ * A list of MSR indices can usually be found in /usr/include/asm/msr-index.h.
+ * Please consult the Intel/AMD manuals for more information on
+ * non-architectural indices.
+ */
+int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
+                          bool enable, bool onchangeonly);
+int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id, bool enable);
+int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
+                                   bool enable);
+int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
+                                 bool enable);
+int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id,
+                             bool enable, bool sync, bool allow_userspace);
+/*
+ * Disables page-walk mem_access events by emulating. If the
+ * emulation can not be performed then a VM_EVENT_REASON_EMUL_UNIMPLEMENTED
+ * event will be issued.
+ */
+int xc_monitor_inguest_pagefault(xc_interface *xch, uint32_t domain_id,
+                                 bool disable);
+int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
+                                bool enable, bool sync);
+int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable);
+int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
+                               bool enable);
+int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
+                                  bool enable);
+/**
+ * This function enables / disables emulation for each REP for a
+ * REP-compatible instruction.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domain_id the domain id one wants to get the node affinity of.
+ * @parm enable if 0 optimize when possible, else emulate each REP.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
+                                bool enable);
+
+/***
+ * Memory sharing operations.
+ *
+ * Unles otherwise noted, these calls return 0 on succes, -1 and errno on
+ * failure.
+ *
+ * Sharing is supported only on the x86 architecture in 64 bit mode, with
+ * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
+ * support is considered experimental.
+
+ * Calls below return ENOSYS if not in the x86_64 architecture.
+ * Calls below return ENODEV if the domain does not support HAP.
+ * Calls below return ESRCH if the specified domain does not exist.
+ * Calls below return EPERM if the caller is unprivileged for this domain.
+ */
+
+/* Turn on/off sharing for the domid, depending on the enable flag.
+ *
+ * Returns EXDEV if trying to enable and the domain has had a PCI device
+ * assigned for passthrough (these two features are mutually exclusive).
+ *
+ * When sharing for a domain is turned off, the domain may still reference
+ * shared pages. Unsharing happens lazily. */
+int xc_memshr_control(xc_interface *xch,
+                      uint32_t domid,
+                      int enable);
+
+/* Create a communication ring in which the hypervisor will place ENOMEM
+ * notifications.
+ *
+ * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
+ * allocated, and thus the out-of-memory error occurr.
+ *
+ * For complete examples on how to plumb a notification ring, look into
+ * xenpaging or xen-access.
+ *
+ * On receipt of a notification, the helper should ensure there is memory
+ * available to the domain before retrying.
+ *
+ * If a domain encounters an ENOMEM condition when sharing and this ring
+ * has not been set up, the hypervisor will crash the domain.
+ *
+ * Fails with:
+ *  EINVAL if port is NULL
+ *  EINVAL if the sharing ring has already been enabled
+ *  ENOSYS if no guest gfn has been specified to host the ring via an hvm param
+ *  EINVAL if the gfn for the ring has not been populated
+ *  ENOENT if the gfn for the ring is paged out, or cannot be unshared
+ *  EINVAL if the gfn for the ring cannot be written to
+ *  EINVAL if the domain is dying
+ *  ENOSPC if an event channel cannot be allocated for the ring
+ *  ENOMEM if memory cannot be allocated for internal data structures
+ *  EINVAL or EACCESS if the request is denied by the security policy
+ */
+
+int xc_memshr_ring_enable(xc_interface *xch,
+                          uint32_t domid,
+                          uint32_t *port);
+/* Disable the ring for ENOMEM communication.
+ * May fail with EINVAL if the ring was not enabled in the first place.
+ */
+int xc_memshr_ring_disable(xc_interface *xch,
+                           uint32_t domid);
+
+/*
+ * Calls below return EINVAL if sharing has not been enabled for the domain
+ * Calls below return EINVAL if the domain is dying
+ */
+/* Once a reponse to an ENOMEM notification is prepared, the tool can
+ * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
+ * event channel kick and/or this call. */
+int xc_memshr_domain_resume(xc_interface *xch,
+                            uint32_t domid);
+
+/* Select a page for sharing.
+ *
+ * A 64 bit opaque handle will be stored in handle.  The hypervisor ensures
+ * that if the page is modified, the handle will be invalidated, and future
+ * users of it will fail. If the page has already been selected and is still
+ * associated to a valid handle, the existing handle will be returned.
+ *
+ * May fail with:
+ *  EINVAL if the gfn is not populated or not sharable (mmio, etc)
+ *  ENOMEM if internal data structures cannot be allocated
+ *  E2BIG if the page is being referenced by other subsytems (e.g. qemu)
+ *  ENOENT or EEXIST if there are internal hypervisor errors.
+ */
+int xc_memshr_nominate_gfn(xc_interface *xch,
+                           uint32_t domid,
+                           unsigned long gfn,
+                           uint64_t *handle);
+/* Same as above, but instead of a guest frame number, the input is a grant
+ * reference provided by the guest.
+ *
+ * May fail with EINVAL if the grant reference is invalid.
+ */
+int xc_memshr_nominate_gref(xc_interface *xch,
+                            uint32_t domid,
+                            grant_ref_t gref,
+                            uint64_t *handle);
+
+/* The three calls below may fail with
+ * 10 (or -XENMEM_SHARING_OP_S_HANDLE_INVALID) if the handle passed as source
+ * is invalid.
+ * 9 (or -XENMEM_SHARING_OP_C_HANDLE_INVALID) if the handle passed as client is
+ * invalid.
+ */
+/* Share two nominated guest pages.
+ *
+ * If the call succeeds, both pages will point to the same backing frame (or
+ * mfn). The hypervisor will verify the handles are still valid, but it will
+ * not perform any sanity checking on the contens of the pages (the selection
+ * mechanism for sharing candidates is entirely up to the user-space tool).
+ *
+ * After successful sharing, the client handle becomes invalid. Both <domain,
+ * gfn> tuples point to the same mfn with the same handle, the one specified as
+ * source. Either 3-tuple can be specified later for further re-sharing.
+ */
+int xc_memshr_share_gfns(xc_interface *xch,
+                    uint32_t source_domain,
+                    unsigned long source_gfn,
+                    uint64_t source_handle,
+                    uint32_t client_domain,
+                    unsigned long client_gfn,
+                    uint64_t client_handle);
+
+/* Same as above, but share two grant references instead.
+ *
+ * May fail with EINVAL if either grant reference is invalid.
+ */
+int xc_memshr_share_grefs(xc_interface *xch,
+                    uint32_t source_domain,
+                    grant_ref_t source_gref,
+                    uint64_t source_handle,
+                    uint32_t client_domain,
+                    grant_ref_t client_gref,
+                    uint64_t client_handle);
+
+/* Allows to add to the guest physmap of the client domain a shared frame
+ * directly.
+ *
+ * May additionally fail with
+ *  9 (-XENMEM_SHARING_OP_C_HANDLE_INVALID) if the physmap entry for the gfn is
+ *  not suitable.
+ *  ENOMEM if internal data structures cannot be allocated.
+ *  ENOENT if there is an internal hypervisor error.
+ */
+int xc_memshr_add_to_physmap(xc_interface *xch,
+                    uint32_t source_domain,
+                    unsigned long source_gfn,
+                    uint64_t source_handle,
+                    uint32_t client_domain,
+                    unsigned long client_gfn);
+
+/* Allows to deduplicate a range of memory of a client domain. Using
+ * this function is equivalent of calling xc_memshr_nominate_gfn for each gfn
+ * in the two domains followed by xc_memshr_share_gfns.
+ *
+ * May fail with -EINVAL if the source and client domain have different
+ * memory size or if memory sharing is not enabled on either of the domains.
+ * May also fail with -ENOMEM if there isn't enough memory available to store
+ * the sharing metadata before deduplication can happen.
+ */
+int xc_memshr_range_share(xc_interface *xch,
+                          uint32_t source_domain,
+                          uint32_t client_domain,
+                          uint64_t first_gfn,
+                          uint64_t last_gfn);
+
+int xc_memshr_fork(xc_interface *xch,
+                   uint32_t source_domain,
+                   uint32_t client_domain,
+                   bool allow_with_iommu,
+                   bool block_interrupts);
+
+/*
+ * Note: this function is only intended to be used on short-lived forks that
+ * haven't yet aquired a lot of memory. In case the fork has a lot of memory
+ * it is likely more performant to create a new fork with xc_memshr_fork.
+ *
+ * With VMs that have a lot of memory this call may block for a long time.
+ */
+int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain);
+
+/* Debug calls: return the number of pages referencing the shared frame backing
+ * the input argument. Should be one or greater.
+ *
+ * May fail with EINVAL if there is no backing shared frame for the input
+ * argument.
+ */
+int xc_memshr_debug_gfn(xc_interface *xch,
+                        uint32_t domid,
+                        unsigned long gfn);
+/* May additionally fail with EINVAL if the grant reference is invalid. */
+int xc_memshr_debug_gref(xc_interface *xch,
+                         uint32_t domid,
+                         grant_ref_t gref);
+
+/* Audits the share subsystem.
+ *
+ * Returns ENOSYS if not supported (may not be compiled into the hypervisor).
+ *
+ * Returns the number of errors found during auditing otherwise. May be (should
+ * be!) zero.
+ *
+ * If debugtrace support has been compiled into the hypervisor and is enabled,
+ * verbose descriptions for the errors are available in the hypervisor console.
+ */
+int xc_memshr_audit(xc_interface *xch);
+
+/* Stats reporting.
+ *
+ * At any point in time, the following equality should hold for a host:
+ *
+ *  Let dominfo(d) be the xc_dominfo_t struct filled by a call to
+ *  xc_domain_getinfo(d)
+ *
+ *  The summation of dominfo(d)->shr_pages for all domains in the system
+ *      should be equal to
+ *  xc_sharing_freed_pages + xc_sharing_used_frames
+ */
+/*
+ * This function returns the total number of pages freed by using sharing
+ * on the system.  For example, if two domains contain a single entry in
+ * their p2m table that points to the same shared page (and no other pages
+ * in the system are shared), then this function should return 1.
+ */
+long xc_sharing_freed_pages(xc_interface *xch);
+
+/*
+ * This function returns the total number of frames occupied by shared
+ * pages on the system.  This is independent of the number of domains
+ * pointing at these frames.  For example, in the above scenario this
+ * should return 1. (And dominfo(d) for each of the two domains should return 1
+ * as well).
+ *
+ * Note that some of these sharing_used_frames may be referenced by
+ * a single domain page, and thus not realize any savings. The same
+ * applies to some of the pages counted in dominfo(d)->shr_pages.
+ */
+long xc_sharing_used_frames(xc_interface *xch);
+/*** End sharing interface ***/
+
+int xc_flask_load(xc_interface *xc_handle, char *buf, uint32_t size);
+int xc_flask_context_to_sid(xc_interface *xc_handle, char *buf, uint32_t size, uint32_t *sid);
+int xc_flask_sid_to_context(xc_interface *xc_handle, int sid, char *buf, uint32_t size);
+int xc_flask_getenforce(xc_interface *xc_handle);
+int xc_flask_setenforce(xc_interface *xc_handle, int mode);
+int xc_flask_getbool_byid(xc_interface *xc_handle, int id, char *name, uint32_t size, int *curr, int *pend);
+int xc_flask_getbool_byname(xc_interface *xc_handle, char *name, int *curr, int *pend);
+int xc_flask_setbool(xc_interface *xc_handle, char *name, int value, int commit);
+int xc_flask_add_pirq(xc_interface *xc_handle, unsigned int pirq, char *scontext);
+int xc_flask_add_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high,
+                      char *scontext);
+int xc_flask_add_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high,
+                     char *scontext);
+int xc_flask_add_device(xc_interface *xc_handle, unsigned long device, char *scontext);
+int xc_flask_del_pirq(xc_interface *xc_handle, unsigned int pirq);
+int xc_flask_del_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high);
+int xc_flask_del_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high);
+int xc_flask_del_device(xc_interface *xc_handle, unsigned long device);
+int xc_flask_access(xc_interface *xc_handle, const char *scon, const char *tcon,
+                  uint16_t tclass, uint32_t req,
+                  uint32_t *allowed, uint32_t *decided,
+                  uint32_t *auditallow, uint32_t *auditdeny,
+                  uint32_t *seqno);
+int xc_flask_avc_cachestats(xc_interface *xc_handle, char *buf, int size);
+int xc_flask_policyvers(xc_interface *xc_handle);
+int xc_flask_avc_hashstats(xc_interface *xc_handle, char *buf, int size);
+int xc_flask_getavc_threshold(xc_interface *xc_handle);
+int xc_flask_setavc_threshold(xc_interface *xc_handle, int threshold);
+int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid);
+
+struct elf_binary;
+void xc_elf_set_logfile(xc_interface *xch, struct elf_binary *elf,
+                        int verbose);
+/* Useful for callers who also use libelf. */
+
+/*
+ * Execute an image previously loaded with xc_kexec_load().
+ *
+ * Does not return on success.
+ *
+ * Fails with:
+ *   ENOENT if the specified image has not been loaded.
+ */
+int xc_kexec_exec(xc_interface *xch, int type);
+
+/*
+ * Find the machine address and size of certain memory areas.
+ *
+ *   KEXEC_RANGE_MA_CRASH       crash area
+ *   KEXEC_RANGE_MA_XEN         Xen itself
+ *   KEXEC_RANGE_MA_CPU         CPU note for CPU number 'nr'
+ *   KEXEC_RANGE_MA_XENHEAP     xenheap
+ *   KEXEC_RANGE_MA_EFI_MEMMAP  EFI Memory Map
+ *   KEXEC_RANGE_MA_VMCOREINFO  vmcoreinfo
+ *
+ * Fails with:
+ *   EINVAL if the range or CPU number isn't valid.
+ */
+int xc_kexec_get_range(xc_interface *xch, int range,  int nr,
+                       uint64_t *size, uint64_t *start);
+
+/*
+ * Load a kexec image into memory.
+ *
+ * The image may be of type KEXEC_TYPE_DEFAULT (executed on request)
+ * or KEXEC_TYPE_CRASH (executed on a crash).
+ *
+ * The image architecture may be a 32-bit variant of the hypervisor
+ * architecture (e.g, EM_386 on a x86-64 hypervisor).
+ *
+ * Fails with:
+ *   ENOMEM if there is insufficient memory for the new image.
+ *   EINVAL if the image does not fit into the crash area or the entry
+ *          point isn't within one of segments.
+ *   EBUSY  if another image is being executed.
+ */
+int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch,
+                  uint64_t entry_maddr,
+                  uint32_t nr_segments, xen_kexec_segment_t *segments);
+
+/*
+ * Unload a kexec image.
+ *
+ * This prevents a KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH image from
+ * being executed.  The crash images are not cleared from the crash
+ * region.
+ */
+int xc_kexec_unload(xc_interface *xch, int type);
+
+/*
+ * Find out whether the image has been succesfully loaded.
+ *
+ * The type can be either KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
+ * If zero is returned, that means no image is loaded for the type.
+ * If one is returned, that means an image is loaded for the type.
+ * Otherwise, negative return value indicates error.
+ */
+int xc_kexec_status(xc_interface *xch, int type);
+
+typedef xenpf_resource_entry_t xc_resource_entry_t;
+
+/*
+ * Generic resource operation which contains multiple non-preemptible
+ * resource access entries that passed to xc_resource_op().
+ */
+struct xc_resource_op {
+    uint64_t result;        /* on return, check this field first */
+    uint32_t cpu;           /* which cpu to run */
+    uint32_t nr_entries;    /* number of resource entries */
+    xc_resource_entry_t *entries;
+};
+
+typedef struct xc_resource_op xc_resource_op_t;
+int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops);
+
+#if defined(__i386__) || defined(__x86_64__)
+enum xc_psr_cmt_type {
+    XC_PSR_CMT_L3_OCCUPANCY,
+    XC_PSR_CMT_TOTAL_MEM_COUNT,
+    XC_PSR_CMT_LOCAL_MEM_COUNT,
+};
+typedef enum xc_psr_cmt_type xc_psr_cmt_type;
+
+enum xc_psr_type {
+    XC_PSR_CAT_L3_CBM      = 1,
+    XC_PSR_CAT_L3_CBM_CODE = 2,
+    XC_PSR_CAT_L3_CBM_DATA = 3,
+    XC_PSR_CAT_L2_CBM      = 4,
+    XC_PSR_MBA_THRTL       = 5,
+};
+typedef enum xc_psr_type xc_psr_type;
+
+enum xc_psr_feat_type {
+    XC_PSR_CAT_L3,
+    XC_PSR_CAT_L2,
+    XC_PSR_MBA,
+};
+typedef enum xc_psr_feat_type xc_psr_feat_type;
+
+union xc_psr_hw_info {
+    struct {
+        uint32_t cos_max;
+        uint32_t cbm_len;
+        bool     cdp_enabled;
+    } cat;
+
+    struct {
+        uint32_t cos_max;
+        uint32_t thrtl_max;
+        bool     linear;
+    } mba;
+};
+typedef union xc_psr_hw_info xc_psr_hw_info;
+
+int xc_psr_cmt_attach(xc_interface *xch, uint32_t domid);
+int xc_psr_cmt_detach(xc_interface *xch, uint32_t domid);
+int xc_psr_cmt_get_domain_rmid(xc_interface *xch, uint32_t domid,
+                               uint32_t *rmid);
+int xc_psr_cmt_get_total_rmid(xc_interface *xch, uint32_t *total_rmid);
+int xc_psr_cmt_get_l3_upscaling_factor(xc_interface *xch,
+                                       uint32_t *upscaling_factor);
+int xc_psr_cmt_get_l3_event_mask(xc_interface *xch, uint32_t *event_mask);
+int xc_psr_cmt_get_l3_cache_size(xc_interface *xch, uint32_t cpu,
+                                 uint32_t *l3_cache_size);
+int xc_psr_cmt_get_data(xc_interface *xch, uint32_t rmid, uint32_t cpu,
+                        uint32_t psr_cmt_type, uint64_t *monitor_data,
+                        uint64_t *tsc);
+int xc_psr_cmt_enabled(xc_interface *xch);
+
+int xc_psr_set_domain_data(xc_interface *xch, uint32_t domid,
+                           xc_psr_type type, uint32_t target,
+                           uint64_t data);
+int xc_psr_get_domain_data(xc_interface *xch, uint32_t domid,
+                           xc_psr_type type, uint32_t target,
+                           uint64_t *data);
+int xc_psr_get_hw_info(xc_interface *xch, uint32_t socket,
+                       xc_psr_feat_type type, xc_psr_hw_info *hw_info);
+
+int xc_get_cpu_levelling_caps(xc_interface *xch, uint32_t *caps);
+int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
+                          uint32_t *nr_features, uint32_t *featureset);
+
+int xc_get_cpu_policy_size(xc_interface *xch, uint32_t *nr_leaves,
+                           uint32_t *nr_msrs);
+int xc_get_system_cpu_policy(xc_interface *xch, uint32_t index,
+                             uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
+                             uint32_t *nr_msrs, xen_msr_entry_t *msrs);
+int xc_get_domain_cpu_policy(xc_interface *xch, uint32_t domid,
+                             uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
+                             uint32_t *nr_msrs, xen_msr_entry_t *msrs);
+int xc_set_domain_cpu_policy(xc_interface *xch, uint32_t domid,
+                             uint32_t nr_leaves, xen_cpuid_leaf_t *leaves,
+                             uint32_t nr_msrs, xen_msr_entry_t *msrs,
+                             uint32_t *err_leaf_p, uint32_t *err_subleaf_p,
+                             uint32_t *err_msr_p);
+
+uint32_t xc_get_cpu_featureset_size(void);
+
+enum xc_static_cpu_featuremask {
+    XC_FEATUREMASK_KNOWN,
+    XC_FEATUREMASK_SPECIAL,
+    XC_FEATUREMASK_PV_MAX,
+    XC_FEATUREMASK_PV_DEF,
+    XC_FEATUREMASK_HVM_SHADOW_MAX,
+    XC_FEATUREMASK_HVM_SHADOW_DEF,
+    XC_FEATUREMASK_HVM_HAP_MAX,
+    XC_FEATUREMASK_HVM_HAP_DEF,
+};
+const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
+
+#endif
+
+int xc_livepatch_upload(xc_interface *xch,
+                        char *name, unsigned char *payload, uint32_t size);
+
+int xc_livepatch_get(xc_interface *xch,
+                     char *name,
+                     xen_livepatch_status_t *status);
+
+/*
+ * Get a number of available payloads and get actual total size of
+ * the payloads' name and metadata arrays.
+ *
+ * This functions is typically executed first before the xc_livepatch_list()
+ * to obtain the sizes and correctly allocate all necessary data resources.
+ *
+ * The return value is zero if the hypercall completed successfully.
+ *
+ * If there was an error performing the sysctl operation, the return value
+ * will contain the hypercall error code value.
+ */
+int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
+                                uint32_t *name_total_size,
+                                uint32_t *metadata_total_size);
+
+/*
+ * The heart of this function is to get an array of the following objects:
+ *   - xen_livepatch_status_t: states and return codes of payloads
+ *   - name: names of payloads
+ *   - len: lengths of corresponding payloads' names
+ *   - metadata: payloads' metadata
+ *   - metadata_len: lengths of corresponding payloads' metadata
+ *
+ * However it is complex because it has to deal with the hypervisor
+ * returning some of the requested data or data being stale
+ * (another hypercall might alter the list).
+ *
+ * The parameters that the function expects to contain data from
+ * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
+ * 'left' are also updated with the number of entries filled out
+ * and respectively the number of entries left to get from hypervisor.
+ *
+ * It is expected that the caller of this function will first issue the
+ * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
+ * and all metadata as well as the current number of payload entries.
+ * The total sizes are required and supplied via the 'name_total_size' and
+ * 'metadata_total_size' parameters.
+ *
+ * The 'max' is to be provided by the caller with the maximum number of
+ * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
+ * can be filled up with.
+ *
+ * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
+ * structure size.
+ *
+ * Each entry in the 'name' array may have an arbitrary size.
+ *
+ * Each entry in the 'len' array is expected to be of uint32_t size.
+ *
+ * Each entry in the 'metadata' array may have an arbitrary size.
+ *
+ * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
+ *
+ * The return value is zero if the hypercall completed successfully.
+ * Note that the return value is _not_ the amount of entries filled
+ * out - that is saved in 'done'.
+ *
+ * If there was an error performing the operation, the return value
+ * will contain an negative -EXX type value. The 'done' and 'left'
+ * will contain the number of entries that had been succesfully
+ * retrieved (if any).
+ */
+int xc_livepatch_list(xc_interface *xch, const unsigned int max,
+                      const unsigned int start,
+                      struct xen_livepatch_status *info,
+                      char *name, uint32_t *len,
+                      const uint32_t name_total_size,
+                      char *metadata, uint32_t *metadata_len,
+                      const uint32_t metadata_total_size,
+                      unsigned int *done, unsigned int *left);
+
+/*
+ * The operations are asynchronous and the hypervisor may take a while
+ * to complete them. The `timeout` offers an option to expire the
+ * operation if it could not be completed within the specified time
+ * (in ns). Value of 0 means let hypervisor decide the best timeout.
+ * The `flags` allows to pass extra parameters to the actions.
+ */
+int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
+int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
+int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
+int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
+
+/*
+ * Ensure cache coherency after memory modifications. A call to this function
+ * is only required on ARM as the x86 architecture provides cache coherency
+ * guarantees. Calling this function on x86 is allowed but has no effect.
+ */
+int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
+                         xen_pfn_t start_pfn, xen_pfn_t nr_pfns);
+
+/* Compat shims */
+#include "xenctrl_compat.h"
+
+#endif /* XENCTRL_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/include/xenctrl_compat.h b/tools/libs/ctrl/include/xenctrl_compat.h
new file mode 100644 (file)
index 0000000..464f645
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Compat shims for use of 3rd party consumers of libxenctrl
+ * functionality which has been split into separate libraries.
+ *
+ * New code should use the separate libraries.
+ *
+ * Each interface must be opted-into separately by defining:
+ *
+ * XC_WANT_COMPAT_EVTCHN_API
+ *  - Functions relating to /dev/xen/evtchn
+ */
+#ifndef XENCTRL_COMPAT_H
+#define XENCTRL_COMPAT_H
+
+#ifdef XC_WANT_COMPAT_MAP_FOREIGN_API
+/**
+ * Memory maps a range within one domain to a local address range.  Mappings
+ * should be unmapped with munmap and should follow the same rules as mmap
+ * regarding page alignment.  Returns NULL on failure.
+ *
+ * @parm xch a handle on an open hypervisor interface
+ * @parm dom the domain to map memory from
+ * @parm size the amount of memory to map (in multiples of page size)
+ * @parm prot same flag as in mmap().
+ * @parm mfn the frame address to map.
+ */
+void *xc_map_foreign_range(xc_interface *xch, uint32_t dom,
+                            int size, int prot,
+                            unsigned long mfn );
+
+void *xc_map_foreign_pages(xc_interface *xch, uint32_t dom, int prot,
+                           const xen_pfn_t *arr, int num );
+
+/* Nothing within the library itself other than the compat wrapper
+ * itself should be using this, everything inside has access to
+ * xenforeignmemory_map().
+ */
+#if !defined(XC_INTERNAL_COMPAT_MAP_FOREIGN_API) || \
+     defined(XC_BUILDING_COMPAT_MAP_FOREIGN_API)
+/**
+ * Like xc_map_foreign_pages(), except it can succeed partially.
+ * When a page cannot be mapped, its respective field in @err is
+ * set to the corresponding errno value.
+ */
+void *xc_map_foreign_bulk(xc_interface *xch, uint32_t dom, int prot,
+                          const xen_pfn_t *arr, int *err, unsigned int num);
+#endif
+
+#endif
+
+#ifdef XC_WANT_COMPAT_EVTCHN_API
+
+typedef struct xenevtchn_handle xc_evtchn;
+typedef xc_evtchn_port_or_error_t evtchn_port_or_error_t;
+
+xc_evtchn *xc_evtchn_open(xentoollog_logger *logger,
+                             unsigned open_flags);
+int xc_evtchn_close(xc_evtchn *xce);
+int xc_evtchn_fd(xc_evtchn *xce);
+int xc_evtchn_notify(xc_evtchn *xce, evtchn_port_t port);
+xc_evtchn_port_or_error_t
+xc_evtchn_bind_unbound_port(xc_evtchn *xce, uint32_t domid);
+xc_evtchn_port_or_error_t
+xc_evtchn_bind_interdomain(xc_evtchn *xce, uint32_t domid,
+                           evtchn_port_t remote_port);
+xc_evtchn_port_or_error_t
+xc_evtchn_bind_virq(xc_evtchn *xce, unsigned int virq);
+int xc_evtchn_unbind(xc_evtchn *xce, evtchn_port_t port);
+xc_evtchn_port_or_error_t
+xc_evtchn_pending(xc_evtchn *xce);
+int xc_evtchn_unmask(xc_evtchn *xce, evtchn_port_t port);
+
+#endif /* XC_WANT_COMPAT_EVTCHN_API */
+
+#ifdef XC_WANT_COMPAT_GNTTAB_API
+
+typedef struct xengntdev_handle xc_gnttab;
+
+xc_gnttab *xc_gnttab_open(xentoollog_logger *logger,
+                          unsigned open_flags);
+int xc_gnttab_close(xc_gnttab *xcg);
+void *xc_gnttab_map_grant_ref(xc_gnttab *xcg,
+                              uint32_t domid,
+                              uint32_t ref,
+                              int prot);
+void *xc_gnttab_map_grant_refs(xc_gnttab *xcg,
+                               uint32_t count,
+                               uint32_t *domids,
+                               uint32_t *refs,
+                               int prot);
+void *xc_gnttab_map_domain_grant_refs(xc_gnttab *xcg,
+                                      uint32_t count,
+                                      uint32_t domid,
+                                      uint32_t *refs,
+                                      int prot);
+void *xc_gnttab_map_grant_ref_notify(xc_gnttab *xcg,
+                                     uint32_t domid,
+                                     uint32_t ref,
+                                     int prot,
+                                     uint32_t notify_offset,
+                                     evtchn_port_t notify_port);
+int xc_gnttab_munmap(xc_gnttab *xcg,
+                     void *start_address,
+                     uint32_t count);
+int xc_gnttab_set_max_grants(xc_gnttab *xcg,
+                             uint32_t count);
+
+typedef struct xengntdev_handle xc_gntshr;
+
+xc_gntshr *xc_gntshr_open(xentoollog_logger *logger,
+                          unsigned open_flags);
+int xc_gntshr_close(xc_gntshr *xcg);
+void *xc_gntshr_share_pages(xc_gntshr *xcg, uint32_t domid,
+                            int count, uint32_t *refs, int writable);
+void *xc_gntshr_share_page_notify(xc_gntshr *xcg, uint32_t domid,
+                                  uint32_t *ref, int writable,
+                                  uint32_t notify_offset,
+                                  evtchn_port_t notify_port);
+int xc_gntshr_munmap(xc_gntshr *xcg, void *start_address, uint32_t count);
+
+#endif /* XC_WANT_COMPAT_GNTTAB_API */
+
+#ifdef XC_WANT_COMPAT_DEVICEMODEL_API
+
+int xc_hvm_create_ioreq_server(
+    xc_interface *xch, uint32_t domid, int handle_bufioreq,
+    ioservid_t *id);
+int xc_hvm_get_ioreq_server_info(
+    xc_interface *xch, uint32_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
+    xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_port);
+int xc_hvm_map_io_range_to_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
+    uint64_t start, uint64_t end);
+int xc_hvm_unmap_io_range_from_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
+    uint64_t start, uint64_t end);
+int xc_hvm_map_pcidev_to_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
+    uint8_t bus, uint8_t device, uint8_t function);
+int xc_hvm_unmap_pcidev_from_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
+    uint8_t bus, uint8_t device, uint8_t function);
+int xc_hvm_destroy_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id);
+int xc_hvm_set_ioreq_server_state(
+    xc_interface *xch, uint32_t domid, ioservid_t id, int enabled);
+int xc_hvm_set_pci_intx_level(
+    xc_interface *xch, uint32_t domid, uint16_t segment, uint8_t bus,
+    uint8_t device, uint8_t intx, unsigned int level);
+int xc_hvm_set_isa_irq_level(
+    xc_interface *xch, uint32_t domid, uint8_t irq, unsigned int level);
+int xc_hvm_set_pci_link_route(
+    xc_interface *xch, uint32_t domid, uint8_t link, uint8_t irq);
+int xc_hvm_inject_msi(
+    xc_interface *xch, uint32_t domid, uint64_t msi_addr, uint32_t msi_data);
+int xc_hvm_track_dirty_vram(
+    xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr,
+    unsigned long *dirty_bitmap);
+int xc_hvm_modified_memory(
+    xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr);
+int xc_hvm_set_mem_type(
+    xc_interface *xch, uint32_t domid, hvmmem_type_t type,
+    uint64_t first_pfn, uint32_t nr);
+int xc_hvm_inject_trap(
+    xc_interface *xch, uint32_t domid, int vcpu, uint8_t vector,
+    uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t cr2);
+int xc_domain_pin_memory_cacheattr(
+    xc_interface *xch, uint32_t domid, uint64_t start, uint64_t end,
+    uint32_t type);
+
+#endif /* XC_WANT_COMPAT_DEVICEMODEL_API */
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/include/xenctrl_dom.h b/tools/libs/ctrl/include/xenctrl_dom.h
new file mode 100644 (file)
index 0000000..40b85b7
--- /dev/null
@@ -0,0 +1,455 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _XC_DOM_H
+#define _XC_DOM_H
+
+#include <xen/libelf/libelf.h>
+
+#define X86_HVM_NR_SPECIAL_PAGES    8
+#define X86_HVM_END_SPECIAL_REGION  0xff000u
+#define XG_MAX_MODULES 2
+
+/* --- typedefs and structs ---------------------------------------- */
+
+typedef uint64_t xen_vaddr_t;
+typedef uint64_t xen_paddr_t;
+
+#define PRIpfn PRI_xen_pfn
+
+struct xc_dom_seg {
+    xen_vaddr_t vstart;
+    xen_vaddr_t vend;
+    xen_pfn_t pfn;
+    xen_pfn_t pages;
+};
+
+struct xc_hvm_firmware_module {
+    uint8_t  *data;
+    uint32_t  length;
+    uint64_t  guest_addr_out;
+};
+
+struct xc_dom_mem {
+    struct xc_dom_mem *next;
+    void *ptr;
+    enum {
+        XC_DOM_MEM_TYPE_MALLOC_INTERNAL,
+        XC_DOM_MEM_TYPE_MALLOC_EXTERNAL,
+        XC_DOM_MEM_TYPE_MMAP,
+    } type;
+    size_t len;
+    unsigned char memory[0];
+};
+
+struct xc_dom_phys {
+    struct xc_dom_phys *next;
+    void *ptr;
+    xen_pfn_t first;
+    xen_pfn_t count;
+};
+
+struct xc_dom_module {
+    void *blob;
+    size_t size;
+    void *cmdline;
+    /* If seg.vstart is non zero then the module will be loaded at that
+     * address, otherwise it will automatically placed.
+     *
+     * If automatic placement is used and the module is gzip
+     * compressed then it will be decompressed as it is loaded. If the
+     * module has been explicitly placed then it is loaded as is
+     * otherwise decompressing risks undoing the manual placement.
+     */
+    struct xc_dom_seg seg;
+};
+
+struct xc_dom_image {
+    /* files */
+    void *kernel_blob;
+    size_t kernel_size;
+    unsigned int num_modules;
+    struct xc_dom_module modules[XG_MAX_MODULES];
+    void *devicetree_blob;
+    size_t devicetree_size;
+
+    size_t max_kernel_size;
+    size_t max_module_size;
+    size_t max_devicetree_size;
+
+    /* arguments and parameters */
+    char *cmdline;
+    size_t cmdline_size;
+    uint32_t f_requested[XENFEAT_NR_SUBMAPS];
+
+    /* info from (elf) kernel image */
+    struct elf_dom_parms parms;
+    char *guest_type;
+
+    /* memory layout */
+    struct xc_dom_seg kernel_seg;
+    struct xc_dom_seg p2m_seg;
+    struct xc_dom_seg pgtables_seg;
+    struct xc_dom_seg devicetree_seg;
+    struct xc_dom_seg start_info_seg;
+    xen_pfn_t start_info_pfn;
+    xen_pfn_t console_pfn;
+    xen_pfn_t xenstore_pfn;
+    xen_pfn_t shared_info_pfn;
+    xen_pfn_t bootstack_pfn;
+    xen_pfn_t pfn_alloc_end;
+    xen_vaddr_t virt_alloc_end;
+    xen_vaddr_t bsd_symtab_start;
+
+    /*
+     * initrd parameters as specified in start_info page
+     * Depending on capabilities of the booted kernel this may be a virtual
+     * address or a pfn. Type is neutral and large enough to hold a virtual
+     * address of a 64 bit kernel even with 32 bit toolstack.
+     */
+    uint64_t initrd_start;
+    uint64_t initrd_len;
+
+    unsigned int alloc_bootstack;
+    xen_vaddr_t virt_pgtab_end;
+
+    /* other state info */
+    uint32_t f_active[XENFEAT_NR_SUBMAPS];
+
+    /*
+     * pv_p2m is specific to x86 PV guests, and maps GFNs to MFNs.  It is
+     * eventually copied into guest context.
+     */
+    xen_pfn_t *pv_p2m;
+
+    /* physical memory
+     *
+     * An x86 PV guest has one or more blocks of physical RAM,
+     * consisting of total_pages starting at 0. The start address and
+     * size of each block is controlled by vNUMA structures.
+     *
+     * An ARM guest has GUEST_RAM_BANKS regions of RAM, with
+     * rambank_size[i] pages in each. The lowest RAM address
+     * (corresponding to the base of the p2m arrays above) is stored
+     * in rambase_pfn.
+     */
+    xen_pfn_t rambase_pfn;
+    xen_pfn_t total_pages;
+    xen_pfn_t p2m_size;         /* number of pfns covered by p2m */
+    struct xc_dom_phys *phys_pages;
+#if defined (__arm__) || defined(__aarch64__)
+    xen_pfn_t rambank_size[GUEST_RAM_BANKS];
+#endif
+
+    /* malloc memory pool */
+    struct xc_dom_mem *memblocks;
+
+    /* memory footprint stats */
+    size_t alloc_malloc;
+    size_t alloc_mem_map;
+    size_t alloc_file_map;
+    size_t alloc_domU_map;
+
+    /* misc xen domain config stuff */
+    unsigned long flags;
+    unsigned int console_evtchn;
+    unsigned int xenstore_evtchn;
+    uint32_t console_domid;
+    uint32_t xenstore_domid;
+    xen_pfn_t shared_info_mfn;
+
+    xc_interface *xch;
+    uint32_t guest_domid;
+    int claim_enabled; /* 0 by default, 1 enables it */
+
+    int xen_version;
+    xen_capabilities_info_t xen_caps;
+
+    /* kernel loader, arch hooks */
+    struct xc_dom_loader *kernel_loader;
+    void *private_loader;
+
+    /* vNUMA information */
+    xen_vmemrange_t *vmemranges;
+    unsigned int nr_vmemranges;
+    unsigned int *vnode_to_pnode;
+    unsigned int nr_vnodes;
+
+    /* domain type/architecture specific data */
+    void *arch_private;
+
+    /* kernel loader */
+    struct xc_dom_arch *arch_hooks;
+    /* allocate up to pfn_alloc_end */
+    int (*allocate) (struct xc_dom_image * dom);
+
+    /* Container type (HVM or PV). */
+    enum {
+        XC_DOM_PV_CONTAINER,
+        XC_DOM_HVM_CONTAINER,
+    } container_type;
+
+    /* HVM specific fields. */
+    xen_pfn_t target_pages;
+    xen_paddr_t mmio_start;
+    xen_paddr_t mmio_size;
+    xen_paddr_t lowmem_end;
+    xen_paddr_t highmem_end;
+    xen_pfn_t vga_hole_size;
+
+    /* If unset disables the setup of the IOREQ pages. */
+    bool device_model;
+
+    /* BIOS/Firmware passed to HVMLOADER */
+    struct xc_hvm_firmware_module system_firmware_module;
+
+    /* Extra ACPI tables */
+#define MAX_ACPI_MODULES        4
+    struct xc_hvm_firmware_module acpi_modules[MAX_ACPI_MODULES];
+
+    /* Extra SMBIOS structures passed to HVMLOADER */
+    struct xc_hvm_firmware_module smbios_module;
+
+#if defined(__i386__) || defined(__x86_64__)
+    struct e820entry *e820;
+    unsigned int e820_entries;
+#endif
+
+    xen_pfn_t vuart_gfn;
+
+    /* Number of vCPUs */
+    unsigned int max_vcpus;
+};
+
+/* --- pluggable kernel loader ------------------------------------- */
+
+struct xc_dom_loader {
+    char *name;
+    /* Sadly the error returns from these functions are not consistent: */
+    elf_negerrnoval (*probe) (struct xc_dom_image * dom);
+    elf_negerrnoval (*parser) (struct xc_dom_image * dom);
+    elf_errorstatus (*loader) (struct xc_dom_image * dom);
+
+    struct xc_dom_loader *next;
+};
+
+#define __init __attribute__ ((constructor))
+void xc_dom_register_loader(struct xc_dom_loader *loader);
+
+/* --- arch specific hooks ----------------------------------------- */
+
+struct xc_dom_arch {
+    int (*alloc_magic_pages) (struct xc_dom_image * dom);
+
+    /* pagetable setup - x86 PV only */
+    int (*alloc_pgtables) (struct xc_dom_image * dom);
+    int (*alloc_p2m_list) (struct xc_dom_image * dom);
+    int (*setup_pgtables) (struct xc_dom_image * dom);
+
+    /* arch-specific data structs setup */
+    /* in Mini-OS environment start_info might be a macro, avoid collision. */
+#undef start_info
+    int (*start_info) (struct xc_dom_image * dom);
+    int (*shared_info) (struct xc_dom_image * dom, void *shared_info);
+    int (*vcpu) (struct xc_dom_image * dom);
+    int (*bootearly) (struct xc_dom_image * dom);
+    int (*bootlate) (struct xc_dom_image * dom);
+
+    /* arch-specific memory initialization. */
+    int (*meminit) (struct xc_dom_image * dom);
+
+    char *guest_type;
+    char *native_protocol;
+    int page_shift;
+    int sizeof_pfn;
+    int p2m_base_supported;
+    int arch_private_size;
+
+    struct xc_dom_arch *next;
+};
+void xc_dom_register_arch_hooks(struct xc_dom_arch *hooks);
+
+#define XC_DOM_PAGE_SHIFT(dom)  ((dom)->arch_hooks->page_shift)
+#define XC_DOM_PAGE_SIZE(dom)   (1LL << (dom)->arch_hooks->page_shift)
+
+/* --- main functions ---------------------------------------------- */
+
+struct xc_dom_image *xc_dom_allocate(xc_interface *xch,
+                                     const char *cmdline, const char *features);
+void xc_dom_release_phys(struct xc_dom_image *dom);
+void xc_dom_release(struct xc_dom_image *dom);
+int xc_dom_rambase_init(struct xc_dom_image *dom, uint64_t rambase);
+int xc_dom_mem_init(struct xc_dom_image *dom, unsigned int mem_mb);
+
+/* Set this larger if you have enormous modules/kernels. Note that
+ * you should trust all kernels not to be maliciously large (e.g. to
+ * exhaust all dom0 memory) if you do this (see CVE-2012-4544 /
+ * XSA-25). You can also set the default independently for
+ * modules/kernels in xc_dom_allocate() or call
+ * xc_dom_{kernel,module}_max_size.
+ */
+#ifndef XC_DOM_DECOMPRESS_MAX
+#define XC_DOM_DECOMPRESS_MAX (1024*1024*1024) /* 1GB */
+#endif
+
+int xc_dom_kernel_check_size(struct xc_dom_image *dom, size_t sz);
+int xc_dom_kernel_max_size(struct xc_dom_image *dom, size_t sz);
+
+int xc_dom_module_max_size(struct xc_dom_image *dom, size_t sz);
+
+int xc_dom_devicetree_max_size(struct xc_dom_image *dom, size_t sz);
+
+size_t xc_dom_check_gzip(xc_interface *xch,
+                     void *blob, size_t ziplen);
+int xc_dom_do_gunzip(xc_interface *xch,
+                     void *src, size_t srclen, void *dst, size_t dstlen);
+int xc_dom_try_gunzip(struct xc_dom_image *dom, void **blob, size_t * size);
+
+int xc_dom_kernel_file(struct xc_dom_image *dom, const char *filename);
+int xc_dom_module_file(struct xc_dom_image *dom, const char *filename,
+                       const char *cmdline);
+int xc_dom_kernel_mem(struct xc_dom_image *dom, const void *mem,
+                      size_t memsize);
+int xc_dom_module_mem(struct xc_dom_image *dom, const void *mem,
+                       size_t memsize, const char *cmdline);
+int xc_dom_devicetree_file(struct xc_dom_image *dom, const char *filename);
+int xc_dom_devicetree_mem(struct xc_dom_image *dom, const void *mem,
+                          size_t memsize);
+
+int xc_dom_parse_image(struct xc_dom_image *dom);
+int xc_dom_set_arch_hooks(struct xc_dom_image *dom);
+int xc_dom_build_image(struct xc_dom_image *dom);
+
+int xc_dom_boot_xen_init(struct xc_dom_image *dom, xc_interface *xch,
+                         uint32_t domid);
+int xc_dom_boot_mem_init(struct xc_dom_image *dom);
+void *xc_dom_boot_domU_map(struct xc_dom_image *dom, xen_pfn_t pfn,
+                           xen_pfn_t count);
+int xc_dom_boot_image(struct xc_dom_image *dom);
+int xc_dom_compat_check(struct xc_dom_image *dom);
+int xc_dom_gnttab_init(struct xc_dom_image *dom);
+int xc_dom_gnttab_seed(xc_interface *xch, uint32_t guest_domid,
+                       bool is_hvm,
+                       xen_pfn_t console_gfn,
+                       xen_pfn_t xenstore_gfn,
+                       uint32_t console_domid,
+                       uint32_t xenstore_domid);
+bool xc_dom_translated(const struct xc_dom_image *dom);
+
+/* --- debugging bits ---------------------------------------------- */
+
+int xc_dom_loginit(xc_interface *xch);
+
+void xc_dom_printf(xc_interface *xch, const char *fmt, ...)
+     __attribute__ ((format(printf, 2, 3)));
+void xc_dom_panic_func(xc_interface *xch,
+                      const char *file, int line, xc_error_code err,
+                      const char *fmt, ...)
+    __attribute__ ((format(printf, 5, 6)));
+
+#define xc_dom_panic(xch, err, fmt, args...) \
+    xc_dom_panic_func(xch, __FILE__, __LINE__, err, fmt, ## args)
+#define xc_dom_trace(mark) \
+    xc_dom_printf("%s:%d: trace %s\n", __FILE__, __LINE__, mark)
+
+void xc_dom_log_memory_footprint(struct xc_dom_image *dom);
+
+/* --- simple memory pool ------------------------------------------ */
+
+void *xc_dom_malloc(struct xc_dom_image *dom, size_t size);
+int xc_dom_register_external(struct xc_dom_image *dom, void *ptr, size_t size);
+void *xc_dom_malloc_page_aligned(struct xc_dom_image *dom, size_t size);
+void *xc_dom_malloc_filemap(struct xc_dom_image *dom,
+                            const char *filename, size_t * size,
+                            const size_t max_size);
+char *xc_dom_strdup(struct xc_dom_image *dom, const char *str);
+
+/* --- alloc memory pool ------------------------------------------- */
+
+xen_pfn_t xc_dom_alloc_page(struct xc_dom_image *dom, char *name);
+int xc_dom_alloc_segment(struct xc_dom_image *dom,
+                         struct xc_dom_seg *seg, char *name,
+                         xen_vaddr_t start, xen_vaddr_t size);
+
+/* --- misc bits --------------------------------------------------- */
+
+void *xc_dom_pfn_to_ptr(struct xc_dom_image *dom, xen_pfn_t first,
+                        xen_pfn_t count);
+void *xc_dom_pfn_to_ptr_retcount(struct xc_dom_image *dom, xen_pfn_t first,
+                                 xen_pfn_t count, xen_pfn_t *count_out);
+void xc_dom_unmap_one(struct xc_dom_image *dom, xen_pfn_t pfn);
+void xc_dom_unmap_all(struct xc_dom_image *dom);
+
+static inline void *xc_dom_seg_to_ptr_pages(struct xc_dom_image *dom,
+                                      struct xc_dom_seg *seg,
+                                      xen_pfn_t *pages_out)
+{
+    void *retval;
+
+    retval = xc_dom_pfn_to_ptr(dom, seg->pfn, seg->pages);
+
+    *pages_out = retval ? seg->pages : 0;
+    return retval;
+}
+
+static inline void *xc_dom_seg_to_ptr(struct xc_dom_image *dom,
+                                      struct xc_dom_seg *seg)
+{
+    xen_pfn_t dummy;
+
+    return xc_dom_seg_to_ptr_pages(dom, seg, &dummy);
+}
+
+static inline void *xc_dom_vaddr_to_ptr(struct xc_dom_image *dom,
+                                        xen_vaddr_t vaddr,
+                                        size_t *safe_region_out)
+{
+    unsigned int page_size = XC_DOM_PAGE_SIZE(dom);
+    xen_pfn_t page = (vaddr - dom->parms.virt_base) / page_size;
+    unsigned int offset = (vaddr - dom->parms.virt_base) % page_size;
+    xen_pfn_t safe_region_count;
+    void *ptr;
+
+    *safe_region_out = 0;
+    ptr = xc_dom_pfn_to_ptr_retcount(dom, page, 0, &safe_region_count);
+    if ( ptr == NULL )
+        return ptr;
+    *safe_region_out = (safe_region_count << XC_DOM_PAGE_SHIFT(dom)) - offset;
+    return ptr + offset;
+}
+
+static inline xen_pfn_t xc_dom_p2m(struct xc_dom_image *dom, xen_pfn_t pfn)
+{
+    if ( xc_dom_translated(dom) )
+        return pfn;
+
+    /* x86 PV only now. */
+    if ( pfn >= dom->total_pages )
+        return INVALID_MFN;
+
+    return dom->pv_p2m[pfn];
+}
+
+#endif /* _XC_DOM_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_altp2m.c b/tools/libs/ctrl/xc_altp2m.c
new file mode 100644 (file)
index 0000000..6987c95
--- /dev/null
@@ -0,0 +1,436 @@
+/******************************************************************************
+ *
+ * xc_altp2m.c
+ *
+ * Interface to altp2m related HVMOPs
+ *
+ * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+#include <stdbool.h>
+#include <xen/hvm/hvm_op.h>
+
+int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_get_domain_state;
+    arg->domain = dom;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    if ( !rc )
+        *state = arg->u.domain_state.state;
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_set_domain_state;
+    arg->domain = dom;
+    arg->u.domain_state.state = state;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
+                                     uint32_t vcpuid, xen_pfn_t gfn)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_vcpu_enable_notify;
+    arg->domain = domid;
+    arg->u.enable_notify.vcpu_id = vcpuid;
+    arg->u.enable_notify.gfn = gfn;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_set_vcpu_disable_notify(xc_interface *handle, uint32_t domid,
+                                      uint32_t vcpuid)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_vcpu_disable_notify;
+    arg->domain = domid;
+    arg->u.disable_notify.vcpu_id = vcpuid;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
+                          xenmem_access_t default_access, uint16_t *view_id)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_create_p2m;
+    arg->domain = domid;
+    arg->u.view.view = -1;
+    arg->u.view.hvmmem_default_access = default_access;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    if ( !rc )
+        *view_id = arg->u.view.view;
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
+                           uint16_t view_id)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_destroy_p2m;
+    arg->domain = domid;
+    arg->u.view.view = view_id;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+/* Switch all vCPUs of the domain to the specified altp2m view */
+int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
+                             uint16_t view_id)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_switch_p2m;
+    arg->domain = domid;
+    arg->u.view.view = view_id;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
+                              uint16_t view_id, xen_pfn_t gfn, bool *sve)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_get_suppress_ve;
+    arg->domain = domid;
+    arg->u.suppress_ve.view = view_id;
+    arg->u.suppress_ve.gfn = gfn;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    if ( !rc )
+        *sve = arg->u.suppress_ve.suppress_ve;
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
+                              uint16_t view_id, xen_pfn_t gfn, bool sve)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_set_suppress_ve;
+    arg->domain = domid;
+    arg->u.suppress_ve.view = view_id;
+    arg->u.suppress_ve.gfn = gfn;
+    arg->u.suppress_ve.suppress_ve = sve;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
+                                   uint16_t view_id, xen_pfn_t first_gfn,
+                                   xen_pfn_t last_gfn, bool sve,
+                                   xen_pfn_t *error_gfn, int32_t *error_code)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_set_suppress_ve_multi;
+    arg->domain = domid;
+    arg->u.suppress_ve_multi.view = view_id;
+    arg->u.suppress_ve_multi.first_gfn = first_gfn;
+    arg->u.suppress_ve_multi.last_gfn = last_gfn;
+    arg->u.suppress_ve_multi.suppress_ve = sve;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    if ( arg->u.suppress_ve_multi.first_error )
+    {
+        *error_gfn = arg->u.suppress_ve_multi.first_error_gfn;
+        *error_code = arg->u.suppress_ve_multi.first_error;
+    }
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
+                             uint16_t view_id, xen_pfn_t gfn,
+                             xenmem_access_t access)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_set_mem_access;
+    arg->domain = domid;
+    arg->u.mem_access.view = view_id;
+    arg->u.mem_access.access = access;
+    arg->u.mem_access.gfn = gfn;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
+                         uint16_t view_id, xen_pfn_t old_gfn,
+                         xen_pfn_t new_gfn)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_change_gfn;
+    arg->domain = domid;
+    arg->u.change_gfn.view = view_id;
+    arg->u.change_gfn.old_gfn = old_gfn;
+    arg->u.change_gfn.new_gfn = new_gfn;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_set_mem_access_multi(xc_interface *xch, uint32_t domid,
+                                   uint16_t view_id, uint8_t *access,
+                                   uint64_t *gfns, uint32_t nr)
+{
+    int rc;
+
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+    DECLARE_HYPERCALL_BOUNCE(access, nr * sizeof(*access),
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(gfns, nr * sizeof(*gfns),
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_set_mem_access_multi;
+    arg->domain = domid;
+    arg->u.set_mem_access_multi.view = view_id;
+    arg->u.set_mem_access_multi.nr = nr;
+
+    if ( xc_hypercall_bounce_pre(xch, gfns) ||
+         xc_hypercall_bounce_pre(xch, access) )
+    {
+        PERROR("Could not bounce memory for HVMOP_altp2m_set_mem_access_multi");
+        return -1;
+    }
+
+    set_xen_guest_handle(arg->u.set_mem_access_multi.pfn_list, gfns);
+    set_xen_guest_handle(arg->u.set_mem_access_multi.access_list, access);
+
+    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(xch, arg);
+    xc_hypercall_bounce_post(xch, access);
+    xc_hypercall_bounce_post(xch, gfns);
+
+    return rc;
+}
+
+int xc_altp2m_get_mem_access(xc_interface *handle, uint32_t domid,
+                             uint16_t view_id, xen_pfn_t gfn,
+                             xenmem_access_t *access)
+{
+    int rc;
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_get_mem_access;
+    arg->domain = domid;
+    arg->u.mem_access.view = view_id;
+    arg->u.mem_access.gfn = gfn;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                 HYPERCALL_BUFFER_AS_ARG(arg));
+
+    if ( !rc )
+        *access = arg->u.mem_access.access;
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_get_vcpu_p2m_idx(xc_interface *handle, uint32_t domid,
+                               uint32_t vcpuid, uint16_t *altp2m_idx)
+{
+    int rc;
+
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_get_p2m_idx;
+    arg->domain = domid;
+    arg->u.get_vcpu_p2m_idx.vcpu_id = vcpuid;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                 HYPERCALL_BUFFER_AS_ARG(arg));
+    if ( !rc )
+        *altp2m_idx = arg->u.get_vcpu_p2m_idx.altp2m_idx;
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
+                             uint16_t view_id, bool visible)
+{
+    int rc;
+
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+    arg->cmd = HVMOP_altp2m_set_visibility;
+    arg->domain = domid;
+    arg->u.set_visibility.altp2m_idx = view_id;
+    arg->u.set_visibility.visible = visible;
+
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
diff --git a/tools/libs/ctrl/xc_arinc653.c b/tools/libs/ctrl/xc_arinc653.c
new file mode 100644 (file)
index 0000000..5d61c1a
--- /dev/null
@@ -0,0 +1,87 @@
+/******************************************************************************
+ * xc_arinc653.c
+ * 
+ * XC interface to the ARINC653 scheduler
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2010 DornerWorks, Ltd. <DornerWorks.com>
+ */
+
+#include "xc_private.h"
+
+int
+xc_sched_arinc653_schedule_set(
+    xc_interface *xch,
+    uint32_t cpupool_id,
+    struct xen_sysctl_arinc653_schedule *schedule)
+{
+    int rc;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(
+        schedule,
+        sizeof(*schedule),
+        XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, schedule) )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_scheduler_op;
+    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
+    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
+    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
+    set_xen_guest_handle(sysctl.u.scheduler_op.u.sched_arinc653.schedule,
+            schedule);
+
+    rc = do_sysctl(xch, &sysctl);
+
+    xc_hypercall_bounce_post(xch, schedule);
+
+    return rc;
+}
+
+int
+xc_sched_arinc653_schedule_get(
+    xc_interface *xch,
+    uint32_t cpupool_id,
+    struct xen_sysctl_arinc653_schedule *schedule)
+{
+    int rc;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(
+        schedule,
+        sizeof(*schedule),
+        XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, schedule) )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_scheduler_op;
+    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
+    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
+    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_getinfo;
+    set_xen_guest_handle(sysctl.u.scheduler_op.u.sched_arinc653.schedule,
+            schedule);
+
+    rc = do_sysctl(xch, &sysctl);
+
+    xc_hypercall_bounce_post(xch, schedule);
+
+    return rc;
+}
diff --git a/tools/libs/ctrl/xc_bitops.h b/tools/libs/ctrl/xc_bitops.h
new file mode 100644 (file)
index 0000000..0951e82
--- /dev/null
@@ -0,0 +1,79 @@
+#ifndef XC_BITOPS_H
+#define XC_BITOPS_H 1
+
+/* bitmap operations for single threaded access */
+
+#include <stdlib.h>
+#include <string.h>
+
+/* Needed by several includees, but no longer used for bitops. */
+#define BITS_PER_LONG (sizeof(unsigned long) * 8)
+#define ORDER_LONG (sizeof(unsigned long) == 4 ? 5 : 6)
+
+#define BITMAP_ENTRY(_nr,_bmap) ((_bmap))[(_nr) / 8]
+#define BITMAP_SHIFT(_nr) ((_nr) % 8)
+
+/* calculate required space for number of bytes needed to hold nr_bits */
+static inline int bitmap_size(int nr_bits)
+{
+    return (nr_bits + 7) / 8;
+}
+
+static inline void *bitmap_alloc(int nr_bits)
+{
+    return calloc(1, bitmap_size(nr_bits));
+}
+
+static inline void bitmap_set(void *addr, int nr_bits)
+{
+    memset(addr, 0xff, bitmap_size(nr_bits));
+}
+
+static inline void bitmap_clear(void *addr, int nr_bits)
+{
+    memset(addr, 0, bitmap_size(nr_bits));
+}
+
+static inline int test_bit(int nr, const void *_addr)
+{
+    const char *addr = _addr;
+    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
+}
+
+static inline void clear_bit(int nr, void *_addr)
+{
+    char *addr = _addr;
+    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
+}
+
+static inline void set_bit(int nr, void *_addr)
+{
+    char *addr = _addr;
+    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
+}
+
+static inline int test_and_clear_bit(int nr, void *addr)
+{
+    int oldbit = test_bit(nr, addr);
+    clear_bit(nr, addr);
+    return oldbit;
+}
+
+static inline int test_and_set_bit(int nr, void *addr)
+{
+    int oldbit = test_bit(nr, addr);
+    set_bit(nr, addr);
+    return oldbit;
+}
+
+static inline void bitmap_or(void *_dst, const void *_other,
+                             int nr_bits)
+{
+    char *dst = _dst;
+    const char *other = _other;
+    int i;
+    for ( i = 0; i < bitmap_size(nr_bits); ++i )
+        dst[i] |= other[i];
+}
+
+#endif  /* XC_BITOPS_H */
diff --git a/tools/libs/ctrl/xc_core.c b/tools/libs/ctrl/xc_core.c
new file mode 100644 (file)
index 0000000..e8c6fb9
--- /dev/null
@@ -0,0 +1,1008 @@
+/*
+ * Elf format, (pfn, gmfn) table, IA64 support.
+ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * xen dump-core file format follows ELF format specification.
+ * Analisys tools shouldn't depends on the order of sections.
+ * They should follow elf header and check section names.
+ *
+ *  +--------------------------------------------------------+
+ *  |ELF header                                              |
+ *  +--------------------------------------------------------+
+ *  |section headers                                         |
+ *  |    null section header                                 |
+ *  |    .shstrtab                                           |
+ *  |    .note.Xen                                           |
+ *  |    .xen_prstatus                                       |
+ *  |    .xen_shared_info if present                         |
+ *  |    .xen_pages                                          |
+ *  |    .xen_p2m or .xen_pfn                                |
+ *  +--------------------------------------------------------+
+ *  |.note.Xen:note section                                  |
+ *  |    "Xen" is used as note name,                         |
+ *  |    types are defined in xen/include/public/elfnote.h   |
+ *  |    and descriptors are defined in xc_core.h.           |
+ *  |    dumpcore none                                       |
+ *  |    dumpcore header                                     |
+ *  |    dumpcore xen version                                |
+ *  |    dumpcore format version                             |
+ *  +--------------------------------------------------------+
+ *  |.xen_prstatus                                           |
+ *  |       vcpu_guest_context_t[nr_vcpus]                   |
+ *  +--------------------------------------------------------+
+ *  |.xen_shared_info if possible                            |
+ *  +--------------------------------------------------------+
+ *  |.xen_pages                                              |
+ *  |    page * nr_pages                                     |
+ *  +--------------------------------------------------------+
+ *  |.xen_p2m or .xen_pfn                                    |
+ *  |    .xen_p2m: struct xen_dumpcore_p2m[nr_pages]         |
+ *  |    .xen_pfn: uint64_t[nr_pages]                        |
+ *  +--------------------------------------------------------+
+ *  |.shstrtab: section header string table                  |
+ *  +--------------------------------------------------------+
+ *
+ */
+
+#include "xc_private.h"
+#include "xc_core.h"
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <xen/libelf/libelf.h>
+
+/* number of pages to write at a time */
+#define DUMP_INCREMENT (4 * 1024)
+
+/* string table */
+struct xc_core_strtab {
+    char       *strings;
+    uint16_t    length;
+    uint16_t    max;
+};
+
+static struct xc_core_strtab*
+xc_core_strtab_init(xc_interface *xch)
+{
+    struct xc_core_strtab *strtab;
+    char *strings;
+    strtab = malloc(sizeof(*strtab));
+    if ( strtab == NULL )
+        return NULL;
+
+    strings = malloc(PAGE_SIZE);
+    if ( strings == NULL )
+    {
+        PERROR("Could not allocate string table init");
+        free(strtab);
+        return NULL;
+    }
+    strtab->strings = strings;
+    strtab->max = PAGE_SIZE;
+
+    /* index 0 represents none */
+    strtab->strings[0] = '\0';
+    strtab->length = 1;
+
+    return strtab;
+}
+
+static void
+xc_core_strtab_free(struct xc_core_strtab *strtab)
+{
+    free(strtab->strings);
+    free(strtab);
+}
+
+static uint16_t
+xc_core_strtab_get(xc_interface *xch, struct xc_core_strtab *strtab, const char *name)
+{
+    uint16_t ret = 0;
+    uint16_t len = strlen(name) + 1;
+
+    if ( strtab->length > UINT16_MAX - len )
+    {
+        PERROR("too long string table");
+        errno = E2BIG;
+        return ret;
+    }
+    
+    if ( strtab->length + len > strtab->max )
+    {
+        char *tmp;
+        if ( strtab->max > UINT16_MAX / 2 )
+        {
+            PERROR("too long string table");
+            errno = ENOMEM;
+            return ret;
+        }
+
+        tmp = realloc(strtab->strings, strtab->max * 2);
+        if ( tmp == NULL )
+        {
+            PERROR("Could not allocate string table");
+            return ret;
+        }
+
+        strtab->strings = tmp;
+        strtab->max *= 2;
+    }
+
+    ret = strtab->length;
+    strcpy(strtab->strings + strtab->length, name);
+    strtab->length += len;
+    return ret;
+}
+
+
+/* section headers */
+struct xc_core_section_headers {
+    uint16_t    num;
+    uint16_t    num_max;
+
+    Elf64_Shdr  *shdrs;
+};
+#define SHDR_INIT       ((uint16_t)16)
+#define SHDR_INC        ((uint16_t)4)
+
+static struct xc_core_section_headers*
+xc_core_shdr_init(xc_interface *xch)
+{
+    struct xc_core_section_headers *sheaders;
+    sheaders = malloc(sizeof(*sheaders));
+    if ( sheaders == NULL )
+        return NULL;
+
+    sheaders->num = 0;
+    sheaders->num_max = SHDR_INIT;
+    sheaders->shdrs = malloc(sizeof(sheaders->shdrs[0]) * sheaders->num_max);
+    if ( sheaders->shdrs == NULL )
+    {
+        free(sheaders);
+        return NULL;
+    }
+    return sheaders;
+}
+
+static void
+xc_core_shdr_free(struct xc_core_section_headers *sheaders)
+{
+    free(sheaders->shdrs);
+    free(sheaders);
+}
+
+Elf64_Shdr*
+xc_core_shdr_get(xc_interface *xch,
+                 struct xc_core_section_headers *sheaders)
+{
+    Elf64_Shdr *shdr;
+
+    if ( sheaders->num == sheaders->num_max )
+    {
+        Elf64_Shdr *shdrs;
+        if ( sheaders->num_max > UINT16_MAX - SHDR_INC )
+        {
+            errno = E2BIG;
+            return NULL;
+        }
+        sheaders->num_max += SHDR_INC;
+        shdrs = realloc(sheaders->shdrs,
+                        sizeof(sheaders->shdrs[0]) * sheaders->num_max);
+        if ( shdrs == NULL )
+            return NULL;
+        sheaders->shdrs = shdrs;
+    }
+
+    shdr = &sheaders->shdrs[sheaders->num];
+    sheaders->num++;
+    memset(shdr, 0, sizeof(*shdr));
+    return shdr;
+}
+
+int
+xc_core_shdr_set(xc_interface *xch,
+                 Elf64_Shdr *shdr,
+                 struct xc_core_strtab *strtab,
+                 const char *name, uint32_t type,
+                 uint64_t offset, uint64_t size,
+                 uint64_t addralign, uint64_t entsize)
+{
+    uint64_t name_idx = xc_core_strtab_get(xch, strtab, name);
+    if ( name_idx == 0 )
+        return -1;
+
+    shdr->sh_name = name_idx;
+    shdr->sh_type = type;
+    shdr->sh_offset = offset;
+    shdr->sh_size = size;
+    shdr->sh_addralign = addralign;
+    shdr->sh_entsize = entsize;
+    return 0;
+}
+
+static void
+xc_core_ehdr_init(Elf64_Ehdr *ehdr)
+{
+    memset(ehdr, 0, sizeof(*ehdr));
+    ehdr->e_ident[EI_MAG0] = ELFMAG0;
+    ehdr->e_ident[EI_MAG1] = ELFMAG1;
+    ehdr->e_ident[EI_MAG2] = ELFMAG2;
+    ehdr->e_ident[EI_MAG3] = ELFMAG3;
+    ehdr->e_ident[EI_CLASS] = ELFCLASS64;
+    ehdr->e_ident[EI_DATA] = ELF_ARCH_DATA;
+    ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+    ehdr->e_ident[EI_OSABI] = ELFOSABI_SYSV;
+    ehdr->e_ident[EI_ABIVERSION] = EV_CURRENT;
+
+    ehdr->e_type = ET_CORE;
+    /* e_machine will be filled in later */
+    ehdr->e_version = EV_CURRENT;
+    ehdr->e_entry = 0;
+    ehdr->e_phoff = 0;
+    ehdr->e_shoff = sizeof(*ehdr);
+    ehdr->e_flags = ELF_CORE_EFLAGS;
+    ehdr->e_ehsize = sizeof(*ehdr);
+    ehdr->e_phentsize = sizeof(Elf64_Phdr);
+    ehdr->e_phnum = 0;
+    ehdr->e_shentsize = sizeof(Elf64_Shdr);
+    /* ehdr->e_shnum and ehdr->e_shstrndx aren't known here yet.
+     * fill it later */
+}
+
+static int
+elfnote_fill_xen_version(xc_interface *xch,
+                         struct xen_dumpcore_elfnote_xen_version_desc
+                         *xen_version)
+{
+    int rc;
+    memset(xen_version, 0, sizeof(*xen_version));
+
+    rc = xc_version(xch, XENVER_version, NULL);
+    if ( rc < 0 )
+        return rc;
+    xen_version->major_version = rc >> 16;
+    xen_version->minor_version = rc & ((1 << 16) - 1);
+
+    rc = xc_version(xch, XENVER_extraversion,
+                    &xen_version->extra_version);
+    if ( rc < 0 )
+        return rc;
+
+    rc = xc_version(xch, XENVER_compile_info,
+                    &xen_version->compile_info);
+    if ( rc < 0 )
+        return rc;
+
+    rc = xc_version(xch,
+                    XENVER_capabilities, &xen_version->capabilities);
+    if ( rc < 0 )
+        return rc;
+
+    rc = xc_version(xch, XENVER_changeset, &xen_version->changeset);
+    if ( rc < 0 )
+        return rc;
+
+    rc = xc_version(xch, XENVER_platform_parameters,
+                    &xen_version->platform_parameters);
+    if ( rc < 0 )
+        return rc;
+
+    rc = xc_version(xch, XENVER_pagesize, NULL);
+    if ( rc < 0 )
+        return rc;
+    xen_version->pagesize = rc;
+
+    return 0;
+}
+
+static void
+elfnote_fill_format_version(struct xen_dumpcore_elfnote_format_version_desc
+                            *format_version)
+{
+    format_version->version = XEN_DUMPCORE_FORMAT_VERSION_CURRENT;
+}
+
+static void
+elfnote_init(struct elfnote *elfnote)
+{
+    /* elf note section */
+    memset(elfnote, 0, sizeof(*elfnote));
+    elfnote->namesz = strlen(XEN_DUMPCORE_ELFNOTE_NAME) + 1;
+    strncpy(elfnote->name, XEN_DUMPCORE_ELFNOTE_NAME, sizeof(elfnote->name));
+}
+
+static int
+elfnote_dump_none(xc_interface *xch, void *args, dumpcore_rtn_t dump_rtn)
+{
+    int sts;
+    struct elfnote elfnote;
+    struct xen_dumpcore_elfnote_none_desc none;
+
+    elfnote_init(&elfnote);
+    /* Avoid compile warning about constant-zero-sized memset(). */
+    /*memset(&none, 0, sizeof(none));*/
+
+    elfnote.descsz = sizeof(none);
+    elfnote.type = XEN_ELFNOTE_DUMPCORE_NONE;
+    sts = dump_rtn(xch, args, (char*)&elfnote, sizeof(elfnote));
+    if ( sts != 0 )
+        return sts;
+    return dump_rtn(xch, args, (char*)&none, sizeof(none));
+}
+
+static int
+elfnote_dump_core_header(
+    xc_interface *xch,
+    void *args, dumpcore_rtn_t dump_rtn, const xc_dominfo_t *info,
+    int nr_vcpus, unsigned long nr_pages)
+{
+    int sts;
+    struct elfnote elfnote;
+    struct xen_dumpcore_elfnote_header_desc header;
+    
+    elfnote_init(&elfnote);
+    memset(&header, 0, sizeof(header));
+    
+    elfnote.descsz = sizeof(header);
+    elfnote.type = XEN_ELFNOTE_DUMPCORE_HEADER;
+    header.xch_magic = info->hvm ? XC_CORE_MAGIC_HVM : XC_CORE_MAGIC;
+    header.xch_nr_vcpus = nr_vcpus;
+    header.xch_nr_pages = nr_pages;
+    header.xch_page_size = PAGE_SIZE;
+    sts = dump_rtn(xch, args, (char*)&elfnote, sizeof(elfnote));
+    if ( sts != 0 )
+        return sts;
+    return dump_rtn(xch, args, (char*)&header, sizeof(header));
+}
+
+static int
+elfnote_dump_xen_version(xc_interface *xch, void *args,
+                         dumpcore_rtn_t dump_rtn, unsigned int guest_width)
+{
+    int sts;
+    struct elfnote elfnote;
+    struct xen_dumpcore_elfnote_xen_version_desc xen_version;
+
+    elfnote_init(&elfnote);
+    memset(&xen_version, 0, sizeof(xen_version));
+
+    elfnote.descsz = sizeof(xen_version);
+    elfnote.type = XEN_ELFNOTE_DUMPCORE_XEN_VERSION;
+    elfnote_fill_xen_version(xch, &xen_version);
+    if (guest_width < sizeof(unsigned long))
+    {
+        // 32 bit elf file format differs in pagesize's alignment
+        char *p = (char *)&xen_version.pagesize;
+        memmove(p - 4, p, sizeof(xen_version.pagesize));
+    }
+    sts = dump_rtn(xch, args, (char*)&elfnote, sizeof(elfnote));
+    if ( sts != 0 )
+        return sts;
+    return dump_rtn(xch, args, (char*)&xen_version, sizeof(xen_version));
+}
+
+static int
+elfnote_dump_format_version(xc_interface *xch,
+                            void *args, dumpcore_rtn_t dump_rtn)
+{
+    int sts;
+    struct elfnote elfnote;
+    struct xen_dumpcore_elfnote_format_version_desc format_version;
+
+    elfnote_init(&elfnote);
+    memset(&format_version, 0, sizeof(format_version));
+    
+    elfnote.descsz = sizeof(format_version);
+    elfnote.type = XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION;
+    elfnote_fill_format_version(&format_version);
+    sts = dump_rtn(xch, args, (char*)&elfnote, sizeof(elfnote));
+    if ( sts != 0 )
+        return sts;
+    return dump_rtn(xch, args, (char*)&format_version, sizeof(format_version));
+}
+
+int
+xc_domain_dumpcore_via_callback(xc_interface *xch,
+                                uint32_t domid,
+                                void *args,
+                                dumpcore_rtn_t dump_rtn)
+{
+    xc_dominfo_t info;
+    shared_info_any_t *live_shinfo = NULL;
+    struct domain_info_context _dinfo = {};
+    struct domain_info_context *dinfo = &_dinfo;
+
+    int nr_vcpus = 0;
+    char *dump_mem, *dump_mem_start = NULL;
+    vcpu_guest_context_any_t *ctxt = NULL;
+    struct xc_core_arch_context arch_ctxt;
+    char dummy[PAGE_SIZE];
+    int dummy_len;
+    int sts = -1;
+
+    unsigned long i;
+    unsigned long j;
+    unsigned long nr_pages;
+
+    xc_core_memory_map_t *memory_map = NULL;
+    unsigned int nr_memory_map;
+    unsigned int map_idx;
+
+    int auto_translated_physmap;
+    xen_pfn_t *p2m = NULL;
+    struct xen_dumpcore_p2m *p2m_array = NULL;
+
+    uint64_t *pfn_array = NULL;
+
+    Elf64_Ehdr ehdr;
+    uint64_t filesz;
+    uint64_t offset;
+    uint64_t fixup;
+
+    struct xc_core_strtab *strtab = NULL;
+    uint16_t strtab_idx;
+    struct xc_core_section_headers *sheaders = NULL;
+    Elf64_Shdr *shdr;
+    xc_core_arch_context_init(&arch_ctxt);
+    if ( (dump_mem_start = malloc(DUMP_INCREMENT*PAGE_SIZE)) == NULL )
+    {
+        PERROR("Could not allocate dump_mem");
+        goto out;
+    }
+
+    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
+    {
+        PERROR("Could not get info for domain");
+        goto out;
+    }
+    /* Map the shared info frame */
+    live_shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
+                                       PROT_READ, info.shared_info_frame);
+    if ( !live_shinfo && !info.hvm )
+    {
+        PERROR("Couldn't map live_shinfo");
+        goto out;
+    }
+    auto_translated_physmap = xc_core_arch_auto_translated_physmap(&info);
+
+    if ( !auto_translated_physmap )
+
+    {
+        if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
+        {
+            PERROR("Could not get address size for domain");
+            goto out;
+        }
+    }
+    else
+    {
+        /*
+         * Autotranslated guest never sets guest width in the first
+         * place. Force guest_width to be sizeof(unsigned long) so
+         * code below functions properly.
+         *
+         * Here is why this is correct.
+         *
+         * 1. Before f969bc9fc, xc_domain_get_guest_width for HVM (x86
+         * and ARM) always returned hypervisor's idea of
+         * sizeof(unsigned long).
+         *
+         * 2. There has never been a situation in which hypervisor's
+         * word width is smaller than toolstack domain's (i.e. no
+         * 32bit hypervisor + 64bit toolstack).
+         *
+         * Predicates in code test guest_width against toolstack
+         * domain's sizeof(unsigned long), so setting guest_width to
+         * toolstack domain's idea of sizeof(unsigned long) matches
+         * the original behaviour for HVM guests.
+         */
+        dinfo->guest_width = sizeof(unsigned long);
+    }
+
+    if ( domid != info.domid )
+    {
+        PERROR("Domain %d does not exist", domid);
+        goto out;
+    }
+
+    ctxt = calloc(sizeof(*ctxt), info.max_vcpu_id + 1);
+    if ( !ctxt )
+    {
+        PERROR("Could not allocate vcpu context array");
+        goto out;
+    }
+
+    for ( i = 0; i <= info.max_vcpu_id; i++ )
+    {
+        if ( xc_vcpu_getcontext(xch, domid, i, &ctxt[nr_vcpus]) == 0 )
+        {
+            if ( xc_core_arch_context_get(&arch_ctxt, &ctxt[nr_vcpus],
+                                          xch, domid) )
+                continue;
+            nr_vcpus++;
+        }
+    }
+    if ( nr_vcpus == 0 )
+    {
+        PERROR("No VCPU context could be grabbed");
+        goto out;
+    }
+
+    /* obtain memory map */
+    sts = xc_core_arch_memory_map_get(xch, &arch_ctxt, &info,
+                                      live_shinfo, &memory_map,
+                                      &nr_memory_map);
+    if ( sts != 0 )
+        goto out;
+
+    /*
+     * Note: this is the *current* number of pages and may change under
+     * a live dump-core.  We'll just take this value, and if more pages
+     * exist, we'll skip them.  If there's less, then we'll just not use
+     * all the array...
+     *
+     * We don't want to use the total potential size of the memory map
+     * since that is usually much higher than info.nr_pages.
+     */
+    nr_pages = info.nr_pages;
+
+    if ( !auto_translated_physmap )
+    {
+        /* obtain p2m table */
+        p2m_array = malloc(nr_pages * sizeof(p2m_array[0]));
+        if ( p2m_array == NULL )
+        {
+            PERROR("Could not allocate p2m array");
+            goto out;
+        }
+
+        sts = xc_core_arch_map_p2m(xch, dinfo->guest_width, &info, live_shinfo,
+                                   &p2m, &dinfo->p2m_size);
+        if ( sts != 0 )
+            goto out;
+    }
+    else
+    {
+        pfn_array = malloc(nr_pages * sizeof(pfn_array[0]));
+        if ( pfn_array == NULL )
+        {
+            PERROR("Could not allocate pfn array");
+            goto out;
+        }
+    }
+
+    /* ehdr.e_shnum and ehdr.e_shstrndx aren't known here yet. fill it later*/
+    xc_core_ehdr_init(&ehdr);
+
+    /* create section header */
+    strtab = xc_core_strtab_init(xch);
+    if ( strtab == NULL )
+    {
+        PERROR("Could not allocate string table");
+        goto out;
+    }
+    sheaders = xc_core_shdr_init(xch);
+    if ( sheaders == NULL )
+    {
+        PERROR("Could not allocate section headers");
+        goto out;
+    }
+    /* null section */
+    shdr = xc_core_shdr_get(xch,sheaders);
+    if ( shdr == NULL )
+    {
+        PERROR("Could not get section header for null section");
+        goto out;
+    }
+
+    /* .shstrtab */
+    shdr = xc_core_shdr_get(xch,sheaders);
+    if ( shdr == NULL )
+    {
+        PERROR("Could not get section header for shstrtab");
+        goto out;
+    }
+    strtab_idx = shdr - sheaders->shdrs;
+    /* strtab_shdr.sh_offset, strtab_shdr.sh_size aren't unknown.
+     * fill it later
+     */
+    sts = xc_core_shdr_set(xch, shdr, strtab, ELF_SHSTRTAB, SHT_STRTAB, 0, 0, 0, 0);
+    if ( sts != 0 )
+        goto out;
+
+    /* elf note section */
+    /* here the number of section header is unknown. fix up offset later. */
+    offset = sizeof(ehdr);
+    filesz =
+        sizeof(struct xen_dumpcore_elfnote_none) +         /* none */
+        sizeof(struct xen_dumpcore_elfnote_header) +       /* core header */
+        sizeof(struct xen_dumpcore_elfnote_xen_version) +  /* xen version */
+        sizeof(struct xen_dumpcore_elfnote_format_version);/* format version */
+    shdr = xc_core_shdr_get(xch,sheaders);
+    if ( shdr == NULL )
+    {
+        PERROR("Could not get section header for note section");
+        goto out;
+    }
+    sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_NOTE, SHT_NOTE,
+                           offset, filesz, 0, 0);
+    if ( sts != 0 )
+        goto out;
+    offset += filesz;
+
+    /* prstatus */
+    shdr = xc_core_shdr_get(xch,sheaders);
+    if ( shdr == NULL )
+    {
+        PERROR("Could not get section header for .xen_prstatus");
+        goto out;
+    }
+    filesz = sizeof(*ctxt) * nr_vcpus;
+    sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_PRSTATUS,
+                           SHT_PROGBITS, offset, filesz,
+                           __alignof__(*ctxt), sizeof(*ctxt));
+    if ( sts != 0 )
+        goto out;
+    offset += filesz;
+
+    /* arch context */
+    sts = xc_core_arch_context_get_shdr(xch, &arch_ctxt, sheaders, strtab,
+                                        &filesz, offset);
+    if ( sts != 0 )
+        goto out;
+    offset += filesz;
+
+    /* shared_info */
+    if ( live_shinfo != NULL )
+    {
+        shdr = xc_core_shdr_get(xch,sheaders);
+        if ( shdr == NULL )
+        {
+            PERROR("Could not get section header for .xen_shared_info");
+            goto out;
+        }
+        filesz = PAGE_SIZE;
+        sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_SHARED_INFO,
+                               SHT_PROGBITS, offset, filesz,
+                               __alignof__(*live_shinfo), PAGE_SIZE);
+        if ( sts != 0 )
+            goto out;
+        offset += filesz;
+    }
+
+    /*
+     * pages and p2m/pfn are the last section to allocate section headers
+     * so that we know the number of section headers here.
+     * 2 = pages section and p2m/pfn table section
+     */
+    fixup = (sheaders->num + 2) * sizeof(*shdr);
+    /* zeroth section should have zero offset */
+    for ( i = 1; i < sheaders->num; i++ )
+        sheaders->shdrs[i].sh_offset += fixup;
+    offset += fixup;
+    dummy_len = ROUNDUP(offset, PAGE_SHIFT) - offset; /* padding length */
+    offset += dummy_len;
+
+    /* pages */
+    shdr = xc_core_shdr_get(xch,sheaders);
+    if ( shdr == NULL )
+    {
+        PERROR("could not get section headers for .xen_pages");
+        goto out;
+    }
+    filesz = (uint64_t)nr_pages * PAGE_SIZE;
+    sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_PAGES, SHT_PROGBITS,
+                           offset, filesz, PAGE_SIZE, PAGE_SIZE);
+    if ( sts != 0 )
+        goto out;
+    offset += filesz;
+
+    /* p2m/pfn table */
+    shdr = xc_core_shdr_get(xch,sheaders);
+    if ( shdr == NULL )
+    {
+        PERROR("Could not get section header for .xen_{p2m, pfn} table");
+        goto out;
+    }
+    if ( !auto_translated_physmap )
+    {
+        filesz = (uint64_t)nr_pages * sizeof(p2m_array[0]);
+        sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_P2M,
+                               SHT_PROGBITS,
+                               offset, filesz, __alignof__(p2m_array[0]),
+                               sizeof(p2m_array[0]));
+    }
+    else
+    {
+        filesz = (uint64_t)nr_pages * sizeof(pfn_array[0]);
+        sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_PFN,
+                               SHT_PROGBITS,
+                               offset, filesz, __alignof__(pfn_array[0]),
+                               sizeof(pfn_array[0]));
+    }
+    if ( sts != 0 )
+        goto out;
+    offset += filesz;
+
+    /* fixing up section header string table section header */
+    filesz = strtab->length;
+    sheaders->shdrs[strtab_idx].sh_offset = offset;
+    sheaders->shdrs[strtab_idx].sh_size = filesz;
+
+    /* write out elf header */
+    ehdr.e_shnum = sheaders->num;
+    ehdr.e_shstrndx = strtab_idx;
+    ehdr.e_machine = ELF_ARCH_MACHINE;
+    sts = dump_rtn(xch, args, (char*)&ehdr, sizeof(ehdr));
+    if ( sts != 0 )
+        goto out;
+
+    /* section headers */
+    sts = dump_rtn(xch, args, (char*)sheaders->shdrs,
+                   sheaders->num * sizeof(sheaders->shdrs[0]));
+    if ( sts != 0 )
+        goto out;
+
+    /* elf note section: xen core header */
+    sts = elfnote_dump_none(xch, args, dump_rtn);
+    if ( sts != 0 )
+        goto out;
+
+    /* elf note section: xen core header */
+    sts = elfnote_dump_core_header(xch, args, dump_rtn, &info, nr_vcpus, nr_pages);
+    if ( sts != 0 )
+        goto out;
+
+    /* elf note section: xen version */
+    sts = elfnote_dump_xen_version(xch, args, dump_rtn, dinfo->guest_width);
+    if ( sts != 0 )
+        goto out;
+
+    /* elf note section: format version */
+    sts = elfnote_dump_format_version(xch, args, dump_rtn);
+    if ( sts != 0 )
+        goto out;
+
+    /* prstatus: .xen_prstatus */
+    sts = dump_rtn(xch, args, (char *)ctxt, sizeof(*ctxt) * nr_vcpus);
+    if ( sts != 0 )
+        goto out;
+
+    if ( live_shinfo != NULL )
+    {
+        /* shared_info: .xen_shared_info */
+        sts = dump_rtn(xch, args, (char*)live_shinfo, PAGE_SIZE);
+        if ( sts != 0 )
+            goto out;
+    }
+
+    /* arch specific context */
+    sts = xc_core_arch_context_dump(xch, &arch_ctxt, args, dump_rtn);
+    if ( sts != 0 )
+        goto out;
+
+    /* Pad the output data to page alignment. */
+    memset(dummy, 0, PAGE_SIZE);
+    sts = dump_rtn(xch, args, dummy, dummy_len);
+    if ( sts != 0 )
+        goto out;
+
+    /* dump pages: .xen_pages */
+    j = 0;
+    dump_mem = dump_mem_start;
+    for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
+    {
+        uint64_t pfn_start;
+        uint64_t pfn_end;
+
+        pfn_start = memory_map[map_idx].addr >> PAGE_SHIFT;
+        pfn_end = pfn_start + (memory_map[map_idx].size >> PAGE_SHIFT);
+        for ( i = pfn_start; i < pfn_end; i++ )
+        {
+            uint64_t gmfn;
+            void *vaddr;
+            
+            if ( j >= nr_pages )
+            {
+                /*
+                 * When live dump-mode (-L option) is specified,
+                 * guest domain may increase memory.
+                 */
+                IPRINTF("exceeded nr_pages (%ld) losing pages", nr_pages);
+                goto copy_done;
+            }
+
+            if ( !auto_translated_physmap )
+            {
+                if ( dinfo->guest_width >= sizeof(unsigned long) )
+                {
+                    if ( dinfo->guest_width == sizeof(unsigned long) )
+                        gmfn = p2m[i];
+                    else
+                        gmfn = ((uint64_t *)p2m)[i];
+                    if ( gmfn == INVALID_PFN )
+                        continue;
+                }
+                else
+                {
+                    gmfn = ((uint32_t *)p2m)[i];
+                    if ( gmfn == (uint32_t)INVALID_PFN )
+                       continue;
+                }
+
+                p2m_array[j].pfn = i;
+                p2m_array[j].gmfn = gmfn;
+            }
+            else
+            {
+                if ( !xc_core_arch_gpfn_may_present(&arch_ctxt, i) )
+                    continue;
+
+                gmfn = i;
+                pfn_array[j] = i;
+            }
+
+            vaddr = xc_map_foreign_range(
+                xch, domid, PAGE_SIZE, PROT_READ, gmfn);
+            if ( vaddr == NULL )
+                continue;
+            memcpy(dump_mem, vaddr, PAGE_SIZE);
+            munmap(vaddr, PAGE_SIZE);
+            dump_mem += PAGE_SIZE;
+            if ( (j + 1) % DUMP_INCREMENT == 0 )
+            {
+                sts = dump_rtn(
+                    xch, args, dump_mem_start, dump_mem - dump_mem_start);
+                if ( sts != 0 )
+                    goto out;
+                dump_mem = dump_mem_start;
+            }
+
+            j++;
+        }
+    }
+
+copy_done:
+    sts = dump_rtn(xch, args, dump_mem_start, dump_mem - dump_mem_start);
+    if ( sts != 0 )
+        goto out;
+    if ( j < nr_pages )
+    {
+        /* When live dump-mode (-L option) is specified,
+         * guest domain may reduce memory. pad with zero pages.
+         */
+        DPRINTF("j (%ld) != nr_pages (%ld)", j, nr_pages);
+        memset(dump_mem_start, 0, PAGE_SIZE);
+        for (; j < nr_pages; j++) {
+            sts = dump_rtn(xch, args, dump_mem_start, PAGE_SIZE);
+            if ( sts != 0 )
+                goto out;
+            if ( !auto_translated_physmap )
+            {
+                p2m_array[j].pfn = XC_CORE_INVALID_PFN;
+                p2m_array[j].gmfn = XC_CORE_INVALID_GMFN;
+            }
+            else
+                pfn_array[j] = XC_CORE_INVALID_PFN;
+        }
+    }
+
+    /* p2m/pfn table: .xen_p2m/.xen_pfn */
+    if ( !auto_translated_physmap )
+        sts = dump_rtn(
+            xch, args, (char *)p2m_array, sizeof(p2m_array[0]) * nr_pages);
+    else
+        sts = dump_rtn(
+            xch, args, (char *)pfn_array, sizeof(pfn_array[0]) * nr_pages);
+    if ( sts != 0 )
+        goto out;
+
+    /* elf section header string table: .shstrtab */
+    sts = dump_rtn(xch, args, strtab->strings, strtab->length);
+    if ( sts != 0 )
+        goto out;
+
+    sts = 0;
+
+out:
+    if ( memory_map != NULL )
+        free(memory_map);
+    if ( p2m != NULL )
+        munmap(p2m, PAGE_SIZE * P2M_FL_ENTRIES);
+    if ( p2m_array != NULL )
+        free(p2m_array);
+    if ( pfn_array != NULL )
+        free(pfn_array);
+    if ( sheaders != NULL )
+        xc_core_shdr_free(sheaders);
+    if ( strtab != NULL )
+        xc_core_strtab_free(strtab);
+    if ( ctxt != NULL )
+        free(ctxt);
+    if ( dump_mem_start != NULL )
+        free(dump_mem_start);
+    if ( live_shinfo != NULL )
+        munmap(live_shinfo, PAGE_SIZE);
+    xc_core_arch_context_free(&arch_ctxt);
+
+    return sts;
+}
+
+/* Callback args for writing to a local dump file. */
+struct dump_args {
+    int     fd;
+};
+
+/* Callback routine for writing to a local dump file. */
+static int local_file_dump(xc_interface *xch,
+                           void *args, char *buffer, unsigned int length)
+{
+    struct dump_args *da = args;
+
+    if ( write_exact(da->fd, buffer, length) == -1 )
+    {
+        PERROR("Failed to write buffer");
+        return -errno;
+    }
+
+    if ( length >= (DUMP_INCREMENT * PAGE_SIZE) )
+    {
+        // Now dumping pages -- make sure we discard clean pages from
+        // the cache after each write
+        discard_file_cache(xch, da->fd, 0 /* no flush */);
+    }
+
+    return 0;
+}
+
+int
+xc_domain_dumpcore(xc_interface *xch,
+                   uint32_t domid,
+                   const char *corename)
+{
+    struct dump_args da;
+    int sts;
+
+    if ( (da.fd = open(corename, O_CREAT|O_RDWR|O_TRUNC, S_IWUSR|S_IRUSR)) < 0 )
+    {
+        PERROR("Could not open corefile %s", corename);
+        return -errno;
+    }
+
+    sts = xc_domain_dumpcore_via_callback(
+        xch, domid, &da, &local_file_dump);
+
+    /* flush and discard any remaining portion of the file from cache */
+    discard_file_cache(xch, da.fd, 1/* flush first*/);
+
+    close(da.fd);
+
+    return sts;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_core.h b/tools/libs/ctrl/xc_core.h
new file mode 100644 (file)
index 0000000..36fb755
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef XC_CORE_H
+#define XC_CORE_H
+
+#include "xen/version.h"
+#include "xc_private.h"
+#include "xen/libelf/elfstructs.h"
+
+/* section names */
+#define XEN_DUMPCORE_SEC_NOTE                   ".note.Xen"
+#define XEN_DUMPCORE_SEC_PRSTATUS               ".xen_prstatus"
+#define XEN_DUMPCORE_SEC_SHARED_INFO            ".xen_shared_info"
+#define XEN_DUMPCORE_SEC_P2M                    ".xen_p2m"
+#define XEN_DUMPCORE_SEC_PFN                    ".xen_pfn"
+#define XEN_DUMPCORE_SEC_PAGES                  ".xen_pages"
+
+/* elf note name */
+#define XEN_DUMPCORE_ELFNOTE_NAME               "Xen"
+/* note numbers are defined in xen/elfnote.h */
+
+struct elfnote {
+    uint32_t    namesz; /* Elf_Note note; */
+    uint32_t    descsz;
+    uint32_t    type;
+    char        name[4]; /* sizeof("Xen") = 4
+                          * Fotunately this is 64bit aligned so that
+                          * we can use same structore for both 32/64bit
+                          */
+};
+
+struct xen_dumpcore_elfnote_none_desc {
+    /* nothing */
+};
+
+struct xen_dumpcore_elfnote_header_desc {
+    uint64_t    xch_magic;
+    uint64_t    xch_nr_vcpus;
+    uint64_t    xch_nr_pages;
+    uint64_t    xch_page_size;
+};
+
+struct xen_dumpcore_elfnote_xen_version_desc {
+    uint64_t                    major_version;
+    uint64_t                    minor_version;
+    xen_extraversion_t          extra_version;
+    xen_compile_info_t          compile_info;
+    xen_capabilities_info_t     capabilities;
+    xen_changeset_info_t        changeset;
+    xen_platform_parameters_t   platform_parameters;
+    uint64_t                    pagesize;
+};
+
+#define XEN_DUMPCORE_FORMAT_VERSION(major, minor)  \
+    ((major) << 32) | ((minor) & 0xffffffff)
+#define XEN_DUMPCORE_FORMAT_MAJOR(version)      ((major) >> 32)
+#define XEN_DUMPCORE_FORMAT_MINOR(version)      ((minor) & 0xffffffff)
+
+#define XEN_DUMPCORE_FORMAT_MAJOR_CURRENT       ((uint64_t)0)
+#define XEN_DUMPCORE_FORMAT_MINOR_CURRENT       ((uint64_t)1)
+#define XEN_DUMPCORE_FORMAT_VERSION_CURRENT                         \
+    XEN_DUMPCORE_FORMAT_VERSION(XEN_DUMPCORE_FORMAT_MAJOR_CURRENT,  \
+                                XEN_DUMPCORE_FORMAT_MINOR_CURRENT)
+
+struct xen_dumpcore_elfnote_format_version_desc {
+    uint64_t    version;
+};
+
+
+struct xen_dumpcore_elfnote_none {
+    struct elfnote                              elfnote;
+    struct xen_dumpcore_elfnote_none_desc       none;
+};
+
+struct xen_dumpcore_elfnote_header {
+    struct elfnote                              elfnote;
+    struct xen_dumpcore_elfnote_header_desc     header;
+};
+
+struct xen_dumpcore_elfnote_xen_version {
+    struct elfnote                                     elfnote;
+    struct xen_dumpcore_elfnote_xen_version_desc        xen_version;
+};
+
+struct xen_dumpcore_elfnote_format_version {
+    struct elfnote                                      elfnote;
+    struct xen_dumpcore_elfnote_format_version_desc     format_version;
+};
+
+#define XC_CORE_INVALID_PFN     (~(uint64_t)0)
+#define XC_CORE_INVALID_GMFN    (~(uint64_t)0)
+struct xen_dumpcore_p2m {
+    uint64_t    pfn;
+    uint64_t    gmfn;
+};
+
+
+struct xc_core_strtab;
+struct xc_core_section_headers;
+
+Elf64_Shdr*
+xc_core_shdr_get(xc_interface *xch,
+                 struct xc_core_section_headers *sheaders);
+int
+xc_core_shdr_set(xc_interface *xch,
+                 Elf64_Shdr *shdr,
+                 struct xc_core_strtab *strtab,
+                 const char *name, uint32_t type,
+                 uint64_t offset, uint64_t size,
+                 uint64_t addralign, uint64_t entsize);
+
+struct xc_core_memory_map {
+    uint64_t    addr;
+    uint64_t    size;
+};
+typedef struct xc_core_memory_map xc_core_memory_map_t;
+int xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info);
+struct xc_core_arch_context;
+int xc_core_arch_memory_map_get(xc_interface *xch,
+                                struct xc_core_arch_context *arch_ctxt,
+                                xc_dominfo_t *info, shared_info_any_t *live_shinfo,
+                                xc_core_memory_map_t **mapp,
+                                unsigned int *nr_entries);
+int xc_core_arch_map_p2m(xc_interface *xch, unsigned int guest_width,
+                         xc_dominfo_t *info, shared_info_any_t *live_shinfo,
+                         xen_pfn_t **live_p2m, unsigned long *pfnp);
+
+int xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width,
+                                  xc_dominfo_t *info,
+                                  shared_info_any_t *live_shinfo,
+                                  xen_pfn_t **live_p2m, unsigned long *pfnp);
+
+int xc_core_arch_get_scratch_gpfn(xc_interface *xch, uint32_t domid,
+                                  xen_pfn_t *gpfn);
+
+
+#if defined (__i386__) || defined (__x86_64__)
+# include "xc_core_x86.h"
+#elif defined (__arm__) || defined(__aarch64__)
+# include "xc_core_arm.h"
+#else
+# error "unsupported architecture"
+#endif
+
+#ifndef ELF_CORE_EFLAGS
+# define ELF_CORE_EFLAGS 0
+#endif
+
+#endif /* XC_CORE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_core_arm.c b/tools/libs/ctrl/xc_core_arm.c
new file mode 100644 (file)
index 0000000..7b587b4
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (c) 2011 Citrix Systems
+ *
+ */
+
+#include "xc_private.h"
+#include "xc_core.h"
+
+#include <xen-tools/libs.h>
+
+int
+xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
+                              unsigned long pfn)
+{
+    /* TODO: memory from DT */
+    if (pfn >= 0x80000 && pfn < 0x88000)
+        return 1;
+    return 0;
+}
+
+int
+xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info)
+{
+    return 1;
+}
+
+int
+xc_core_arch_memory_map_get(xc_interface *xch, struct xc_core_arch_context *unused,
+                            xc_dominfo_t *info, shared_info_any_t *live_shinfo,
+                            xc_core_memory_map_t **mapp,
+                            unsigned int *nr_entries)
+{
+    xen_pfn_t p2m_size = 0;
+    xc_core_memory_map_t *map;
+
+    if ( xc_domain_nr_gpfns(xch, info->domid, &p2m_size) < 0 )
+        return -1;
+
+    map = malloc(sizeof(*map));
+    if ( map == NULL )
+    {
+        PERROR("Could not allocate memory");
+        return -1;
+    }
+
+    map->addr = 0;
+    map->size = ((uint64_t)p2m_size) << PAGE_SHIFT;
+
+    *mapp = map;
+    *nr_entries = 1;
+    return 0;
+}
+
+static int
+xc_core_arch_map_p2m_rw(xc_interface *xch, struct domain_info_context *dinfo, xc_dominfo_t *info,
+                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
+                        unsigned long *pfnp, int rw)
+{
+    errno = ENOSYS;
+    return -1;
+}
+
+int
+xc_core_arch_map_p2m(xc_interface *xch, unsigned int guest_width, xc_dominfo_t *info,
+                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
+                        unsigned long *pfnp)
+{
+    struct domain_info_context _dinfo = { .guest_width = guest_width };
+    struct domain_info_context *dinfo = &_dinfo;
+    return xc_core_arch_map_p2m_rw(xch, dinfo, info,
+                                   live_shinfo, live_p2m, pfnp, 0);
+}
+
+int
+xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width, xc_dominfo_t *info,
+                              shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
+                              unsigned long *pfnp)
+{
+    struct domain_info_context _dinfo = { .guest_width = guest_width };
+    struct domain_info_context *dinfo = &_dinfo;
+    return xc_core_arch_map_p2m_rw(xch, dinfo, info,
+                                   live_shinfo, live_p2m, pfnp, 1);
+}
+
+int
+xc_core_arch_get_scratch_gpfn(xc_interface *xch, uint32_t domid,
+                              xen_pfn_t *gpfn)
+{
+    /*
+     * The Grant Table region space is not used until the guest is
+     * booting. Use the first page for the scratch pfn.
+     */
+    BUILD_BUG_ON(GUEST_GNTTAB_SIZE < XC_PAGE_SIZE);
+
+    *gpfn = GUEST_GNTTAB_BASE >> XC_PAGE_SHIFT;
+
+    return 0;
+}
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_core_arm.h b/tools/libs/ctrl/xc_core_arm.h
new file mode 100644 (file)
index 0000000..162f7a7
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (c) 2012 Citrix Systems
+ *
+ */
+
+#ifndef XC_CORE_ARM_H
+#define XC_CORE_ARM_H
+
+#define ELF_ARCH_DATA           ELFDATA2LSB
+#define ELF_ARCH_MACHINE        EM_ARM
+
+struct xc_core_arch_context {
+    /* nothing */
+};
+
+#define xc_core_arch_context_init(arch_ctxt)            do {} while (0)
+#define xc_core_arch_context_free(arch_ctxt)            do {} while (0)
+#define xc_core_arch_context_get(arch_ctxt, ctxt, xch, domid) \
+                                                                (0)
+#define xc_core_arch_context_dump(xch, arch_ctxt, args, dump_rtn)    (0)
+
+int
+xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
+                              unsigned long pfn);
+static inline int
+xc_core_arch_context_get_shdr(xc_interface *xch,
+                              struct xc_core_arch_context *arch_ctxt, 
+                              struct xc_core_section_headers *sheaders,
+                              struct xc_core_strtab *strtab,
+                              uint64_t *filesz, uint64_t offset)
+{
+    *filesz = 0;
+    return 0;
+}
+
+#endif /* XC_CORE_ARM_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_core_x86.c b/tools/libs/ctrl/xc_core_x86.c
new file mode 100644 (file)
index 0000000..cb76e62
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ */
+
+#include "xc_private.h"
+#include "xc_core.h"
+#include <xen/hvm/e820.h>
+
+int
+xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
+                              unsigned long pfn)
+{
+    if ((pfn >= 0xa0 && pfn < 0xc0) /* VGA hole */
+        || (pfn >= (HVM_BELOW_4G_MMIO_START >> PAGE_SHIFT)
+            && pfn < (1ULL<<32) >> PAGE_SHIFT)) /* MMIO */
+        return 0;
+    return 1;
+}
+
+int
+xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info)
+{
+    return info->hvm;
+}
+
+int
+xc_core_arch_memory_map_get(xc_interface *xch, struct xc_core_arch_context *unused,
+                            xc_dominfo_t *info, shared_info_any_t *live_shinfo,
+                            xc_core_memory_map_t **mapp,
+                            unsigned int *nr_entries)
+{
+    xen_pfn_t p2m_size = 0;
+    xc_core_memory_map_t *map;
+
+    if ( xc_domain_nr_gpfns(xch, info->domid, &p2m_size) < 0 )
+        return -1;
+
+    map = malloc(sizeof(*map));
+    if ( map == NULL )
+    {
+        PERROR("Could not allocate memory");
+        return -1;
+    }
+
+    map->addr = 0;
+    map->size = ((uint64_t)p2m_size) << PAGE_SHIFT;
+
+    *mapp = map;
+    *nr_entries = 1;
+    return 0;
+}
+
+static int
+xc_core_arch_map_p2m_rw(xc_interface *xch, struct domain_info_context *dinfo, xc_dominfo_t *info,
+                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
+                        unsigned long *pfnp, int rw)
+{
+    /* Double and single indirect references to the live P2M table */
+    xen_pfn_t *live_p2m_frame_list_list = NULL;
+    xen_pfn_t *live_p2m_frame_list = NULL;
+    /* Copies of the above. */
+    xen_pfn_t *p2m_frame_list_list = NULL;
+    xen_pfn_t *p2m_frame_list = NULL;
+
+    uint32_t dom = info->domid;
+    int ret = -1;
+    int err;
+    int i;
+
+    if ( xc_domain_nr_gpfns(xch, info->domid, &dinfo->p2m_size) < 0 )
+    {
+        ERROR("Could not get maximum GPFN!");
+        goto out;
+    }
+
+    if ( dinfo->p2m_size < info->nr_pages  )
+    {
+        ERROR("p2m_size < nr_pages -1 (%lx < %lx", dinfo->p2m_size, info->nr_pages - 1);
+        goto out;
+    }
+
+    live_p2m_frame_list_list =
+        xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_READ,
+                             GET_FIELD(live_shinfo, arch.pfn_to_mfn_frame_list_list, dinfo->guest_width));
+
+    if ( !live_p2m_frame_list_list )
+    {
+        PERROR("Couldn't map p2m_frame_list_list (errno %d)", errno);
+        goto out;
+    }
+
+    /* Get a local copy of the live_P2M_frame_list_list */
+    if ( !(p2m_frame_list_list = malloc(PAGE_SIZE)) )
+    {
+        ERROR("Couldn't allocate p2m_frame_list_list array");
+        goto out;
+    }
+    memcpy(p2m_frame_list_list, live_p2m_frame_list_list, PAGE_SIZE);
+
+    /* Canonicalize guest's unsigned long vs ours */
+    if ( dinfo->guest_width > sizeof(unsigned long) )
+        for ( i = 0; i < PAGE_SIZE/sizeof(unsigned long); i++ )
+            if ( i < PAGE_SIZE/dinfo->guest_width )
+                p2m_frame_list_list[i] = ((uint64_t *)p2m_frame_list_list)[i];
+            else
+                p2m_frame_list_list[i] = 0;
+    else if ( dinfo->guest_width < sizeof(unsigned long) )
+        for ( i = PAGE_SIZE/sizeof(unsigned long) - 1; i >= 0; i-- )
+            p2m_frame_list_list[i] = ((uint32_t *)p2m_frame_list_list)[i];
+
+    live_p2m_frame_list =
+        xc_map_foreign_pages(xch, dom, PROT_READ,
+                             p2m_frame_list_list,
+                             P2M_FLL_ENTRIES);
+
+    if ( !live_p2m_frame_list )
+    {
+        PERROR("Couldn't map p2m_frame_list");
+        goto out;
+    }
+
+    /* Get a local copy of the live_P2M_frame_list */
+    if ( !(p2m_frame_list = malloc(P2M_TOOLS_FL_SIZE)) )
+    {
+        ERROR("Couldn't allocate p2m_frame_list array");
+        goto out;
+    }
+    memset(p2m_frame_list, 0, P2M_TOOLS_FL_SIZE);
+    memcpy(p2m_frame_list, live_p2m_frame_list, P2M_GUEST_FL_SIZE);
+
+    /* Canonicalize guest's unsigned long vs ours */
+    if ( dinfo->guest_width > sizeof(unsigned long) )
+        for ( i = 0; i < P2M_FL_ENTRIES; i++ )
+            p2m_frame_list[i] = ((uint64_t *)p2m_frame_list)[i];
+    else if ( dinfo->guest_width < sizeof(unsigned long) )
+        for ( i = P2M_FL_ENTRIES - 1; i >= 0; i-- )
+            p2m_frame_list[i] = ((uint32_t *)p2m_frame_list)[i];
+
+    *live_p2m = xc_map_foreign_pages(xch, dom,
+                                    rw ? (PROT_READ | PROT_WRITE) : PROT_READ,
+                                    p2m_frame_list,
+                                    P2M_FL_ENTRIES);
+
+    if ( !*live_p2m )
+    {
+        PERROR("Couldn't map p2m table");
+        goto out;
+    }
+
+    *pfnp = dinfo->p2m_size;
+
+    ret = 0;
+
+out:
+    err = errno;
+
+    if ( live_p2m_frame_list_list )
+        munmap(live_p2m_frame_list_list, PAGE_SIZE);
+
+    if ( live_p2m_frame_list )
+        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
+
+    free(p2m_frame_list_list);
+
+    free(p2m_frame_list);
+
+    errno = err;
+    return ret;
+}
+
+int
+xc_core_arch_map_p2m(xc_interface *xch, unsigned int guest_width, xc_dominfo_t *info,
+                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
+                        unsigned long *pfnp)
+{
+    struct domain_info_context _dinfo = { .guest_width = guest_width };
+    struct domain_info_context *dinfo = &_dinfo;
+    return xc_core_arch_map_p2m_rw(xch, dinfo, info,
+                                   live_shinfo, live_p2m, pfnp, 0);
+}
+
+int
+xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width, xc_dominfo_t *info,
+                              shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
+                              unsigned long *pfnp)
+{
+    struct domain_info_context _dinfo = { .guest_width = guest_width };
+    struct domain_info_context *dinfo = &_dinfo;
+    return xc_core_arch_map_p2m_rw(xch, dinfo, info,
+                                   live_shinfo, live_p2m, pfnp, 1);
+}
+
+int
+xc_core_arch_get_scratch_gpfn(xc_interface *xch, uint32_t domid,
+                              xen_pfn_t *gpfn)
+{
+    return xc_domain_nr_gpfns(xch, domid, gpfn);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_core_x86.h b/tools/libs/ctrl/xc_core_x86.h
new file mode 100644 (file)
index 0000000..867146b
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
+ *                    VA Linux Systems Japan K.K.
+ *
+ */
+
+#ifndef XC_CORE_X86_H
+#define XC_CORE_X86_H
+
+#define ELF_ARCH_DATA           ELFDATA2LSB
+#define ELF_ARCH_MACHINE       (dinfo->guest_width == 8 ? EM_X86_64 : EM_386)
+
+struct xc_core_arch_context {
+    /* nothing */
+};
+
+#define xc_core_arch_context_init(arch_ctxt)            do {} while (0)
+#define xc_core_arch_context_free(arch_ctxt)            do {} while (0)
+#define xc_core_arch_context_get(arch_ctxt, ctxt, xch, domid) \
+                                                                (0)
+#define xc_core_arch_context_dump(xch, arch_ctxt, args, dump_rtn)    (0)
+
+int
+xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
+                              unsigned long pfn);
+static inline int
+xc_core_arch_context_get_shdr(xc_interface *xch,
+                              struct xc_core_arch_context *arch_ctxt, 
+                              struct xc_core_section_headers *sheaders,
+                              struct xc_core_strtab *strtab,
+                              uint64_t *filesz, uint64_t offset)
+{
+    *filesz = 0;
+    return 0;
+}
+
+#endif /* XC_CORE_X86_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_cpu_hotplug.c b/tools/libs/ctrl/xc_cpu_hotplug.c
new file mode 100644 (file)
index 0000000..2ea9825
--- /dev/null
@@ -0,0 +1,74 @@
+/******************************************************************************
+ * xc_cpu_hotplug.c - Libxc API for Xen Physical CPU hotplug Management
+ *
+ * Copyright (c) 2008, Shan Haitao <haitao.shan@intel.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "xc_private.h"
+
+int xc_cpu_online(xc_interface *xch, int cpu)
+{
+    DECLARE_SYSCTL;
+    int ret;
+
+    sysctl.cmd = XEN_SYSCTL_cpu_hotplug;
+    sysctl.u.cpu_hotplug.cpu = cpu;
+    sysctl.u.cpu_hotplug.op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
+    ret = xc_sysctl(xch, &sysctl);
+
+    return ret;
+}
+
+int xc_cpu_offline(xc_interface *xch, int cpu)
+{
+    DECLARE_SYSCTL;
+    int ret;
+
+    sysctl.cmd = XEN_SYSCTL_cpu_hotplug;
+    sysctl.u.cpu_hotplug.cpu = cpu;
+    sysctl.u.cpu_hotplug.op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
+    ret = xc_sysctl(xch, &sysctl);
+
+    return ret;
+}
+
+int xc_smt_enable(xc_interface *xch)
+{
+    DECLARE_SYSCTL;
+    int ret;
+
+    sysctl.cmd = XEN_SYSCTL_cpu_hotplug;
+    sysctl.u.cpu_hotplug.cpu = 0;
+    sysctl.u.cpu_hotplug.op = XEN_SYSCTL_CPU_HOTPLUG_SMT_ENABLE;
+    ret = xc_sysctl(xch, &sysctl);
+
+    return ret;
+}
+
+int xc_smt_disable(xc_interface *xch)
+{
+    DECLARE_SYSCTL;
+    int ret;
+
+    sysctl.cmd = XEN_SYSCTL_cpu_hotplug;
+    sysctl.u.cpu_hotplug.cpu = 0;
+    sysctl.u.cpu_hotplug.op = XEN_SYSCTL_CPU_HOTPLUG_SMT_DISABLE;
+    ret = xc_sysctl(xch, &sysctl);
+
+    return ret;
+}
+
diff --git a/tools/libs/ctrl/xc_cpupool.c b/tools/libs/ctrl/xc_cpupool.c
new file mode 100644 (file)
index 0000000..fbd8cc9
--- /dev/null
@@ -0,0 +1,219 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include <unistd.h>
+#include "xc_private.h"
+
+static int do_sysctl_save(xc_interface *xch, struct xen_sysctl *sysctl)
+{
+    int ret;
+
+    do {
+        ret = do_sysctl(xch, sysctl);
+    } while ( (ret < 0) && (errno == EAGAIN) );
+
+    return ret;
+}
+
+int xc_cpupool_create(xc_interface *xch,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id)
+{
+    int err;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_CREATE;
+    sysctl.u.cpupool_op.cpupool_id = (*ppoolid == XC_CPUPOOL_POOLID_ANY) ?
+        XEN_SYSCTL_CPUPOOL_PAR_ANY : *ppoolid;
+    sysctl.u.cpupool_op.sched_id = sched_id;
+    if ( (err = do_sysctl_save(xch, &sysctl)) != 0 )
+        return err;
+
+    *ppoolid = sysctl.u.cpupool_op.cpupool_id;
+    return 0;
+}
+
+int xc_cpupool_destroy(xc_interface *xch,
+                       uint32_t poolid)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_DESTROY;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    return do_sysctl_save(xch, &sysctl);
+}
+
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, 
+                       uint32_t poolid)
+{
+    int err = 0;
+    xc_cpupoolinfo_t *info = NULL;
+    int local_size;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+
+    local_size = xc_get_cpumap_size(xch);
+    if (local_size <= 0)
+    {
+        PERROR("Could not get number of cpus");
+        return NULL;
+    }
+
+    local = xc_hypercall_buffer_alloc(xch, local, local_size);
+    if ( local == NULL ) {
+        PERROR("Could not allocate locked memory for xc_cpupool_getinfo");
+        return NULL;
+    }
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    sysctl.u.cpupool_op.cpumap.nr_bits = local_size * 8;
+
+    err = do_sysctl_save(xch, &sysctl);
+
+    if ( err < 0 )
+       goto out;
+
+    info = calloc(1, sizeof(xc_cpupoolinfo_t));
+    if ( !info )
+       goto out;
+
+    info->cpumap = xc_cpumap_alloc(xch);
+    if (!info->cpumap) {
+        free(info);
+        info = NULL;
+        goto out;
+    }
+    info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+    info->sched_id = sysctl.u.cpupool_op.sched_id;
+    info->n_dom = sysctl.u.cpupool_op.n_dom;
+    memcpy(info->cpumap, local, local_size);
+
+out:
+    xc_hypercall_buffer_free(xch, local);
+
+    return info;
+}
+
+void xc_cpupool_infofree(xc_interface *xch,
+                         xc_cpupoolinfo_t *info)
+{
+    free(info->cpumap);
+    free(info);
+}
+
+int xc_cpupool_addcpu(xc_interface *xch,
+                      uint32_t poolid,
+                      int cpu)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_ADDCPU;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_sysctl_save(xch, &sysctl);
+}
+
+/*
+ * The hypervisor might return EADDRINUSE when trying to remove a cpu from a
+ * cpupool when a domain running in this cpupool has pinned a vcpu
+ * temporarily. Do some retries in this case, perhaps the situation
+ * cleans up.
+ */
+#define NUM_RMCPU_BUSY_RETRIES 5
+
+int xc_cpupool_removecpu(xc_interface *xch,
+                         uint32_t poolid,
+                         int cpu)
+{
+    unsigned retries;
+    int err = 0;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_RMCPU;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
+    for ( retries = 0; retries < NUM_RMCPU_BUSY_RETRIES; retries++ ) {
+        err = do_sysctl_save(xch, &sysctl);
+        if ( err == 0 || errno != EADDRINUSE )
+            break;
+    }
+    return err;
+}
+
+int xc_cpupool_movedomain(xc_interface *xch,
+                          uint32_t poolid,
+                          uint32_t domid)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN;
+    sysctl.u.cpupool_op.cpupool_id = poolid;
+    sysctl.u.cpupool_op.domid = domid;
+    return do_sysctl_save(xch, &sysctl);
+}
+
+xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch)
+{
+    int err = -1;
+    xc_cpumap_t cpumap = NULL;
+    int mapsize;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+
+    mapsize = xc_get_cpumap_size(xch);
+    if (mapsize <= 0)
+        return NULL;
+
+    local = xc_hypercall_buffer_alloc(xch, local, mapsize);
+    if ( local == NULL ) {
+        PERROR("Could not allocate locked memory for xc_cpupool_freeinfo");
+        return NULL;
+    }
+
+    sysctl.cmd = XEN_SYSCTL_cpupool_op;
+    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
+    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    sysctl.u.cpupool_op.cpumap.nr_bits = mapsize * 8;
+
+    err = do_sysctl_save(xch, &sysctl);
+
+    if ( err < 0 )
+        goto out;
+
+    cpumap = xc_cpumap_alloc(xch);
+    if (cpumap == NULL)
+        goto out;
+
+    memcpy(cpumap, local, mapsize);
+
+out:
+    xc_hypercall_buffer_free(xch, local);
+    return cpumap;
+}
diff --git a/tools/libs/ctrl/xc_csched.c b/tools/libs/ctrl/xc_csched.c
new file mode 100644 (file)
index 0000000..8e8c672
--- /dev/null
@@ -0,0 +1,109 @@
+/****************************************************************************
+ * (C) 2006 - Emmanuel Ackaouy - XenSource Inc.
+ ****************************************************************************
+ *
+ *        File: xc_csched.c
+ *      Author: Emmanuel Ackaouy
+ *
+ * Description: XC Interface to the credit scheduler
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+int
+xc_sched_credit_domain_set(
+    xc_interface *xch,
+    uint32_t domid,
+    struct xen_domctl_sched_credit *sdom)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
+    domctl.u.scheduler_op.u.credit = *sdom;
+
+    if ( do_domctl(xch, &domctl) )
+        return -1;
+
+    return 0;
+}
+
+int
+xc_sched_credit_domain_get(
+    xc_interface *xch,
+    uint32_t domid,
+    struct xen_domctl_sched_credit *sdom)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
+
+    if ( do_domctl(xch, &domctl) )
+        return -1;
+
+    *sdom = domctl.u.scheduler_op.u.credit;
+
+    return 0;
+}
+
+int
+xc_sched_credit_params_set(
+    xc_interface *xch,
+    uint32_t cpupool_id,
+    struct xen_sysctl_credit_schedule *schedule)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_scheduler_op;
+    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
+    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
+    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
+
+    sysctl.u.scheduler_op.u.sched_credit = *schedule;
+
+    if ( do_sysctl(xch, &sysctl) )
+        return -1;
+
+    *schedule = sysctl.u.scheduler_op.u.sched_credit;
+
+    return 0;
+}
+
+int
+xc_sched_credit_params_get(
+    xc_interface *xch,
+    uint32_t cpupool_id,
+    struct xen_sysctl_credit_schedule *schedule)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_scheduler_op;
+    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
+    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
+    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_getinfo;
+
+    if ( do_sysctl(xch, &sysctl) )
+        return -1;
+
+    *schedule = sysctl.u.scheduler_op.u.sched_credit;
+
+    return 0;
+}
diff --git a/tools/libs/ctrl/xc_csched2.c b/tools/libs/ctrl/xc_csched2.c
new file mode 100644 (file)
index 0000000..5eb753a
--- /dev/null
@@ -0,0 +1,109 @@
+/****************************************************************************
+ * (C) 2006 - Emmanuel Ackaouy - XenSource Inc.
+ ****************************************************************************
+ *
+ *        File: xc_csched.c
+ *      Author: Emmanuel Ackaouy
+ *
+ * Description: XC Interface to the credit scheduler
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+int
+xc_sched_credit2_domain_set(
+    xc_interface *xch,
+    uint32_t domid,
+    struct xen_domctl_sched_credit2 *sdom)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
+    domctl.u.scheduler_op.u.credit2 = *sdom;
+
+    if ( do_domctl(xch, &domctl) )
+        return -1;
+
+    return 0;
+}
+
+int
+xc_sched_credit2_domain_get(
+    xc_interface *xch,
+    uint32_t domid,
+    struct xen_domctl_sched_credit2 *sdom)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
+
+    if ( do_domctl(xch, &domctl) )
+        return -1;
+
+    *sdom = domctl.u.scheduler_op.u.credit2;
+
+    return 0;
+}
+
+int
+xc_sched_credit2_params_set(
+    xc_interface *xch,
+    uint32_t cpupool_id,
+    struct xen_sysctl_credit2_schedule *schedule)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_scheduler_op;
+    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
+    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
+    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
+
+    sysctl.u.scheduler_op.u.sched_credit2 = *schedule;
+
+    if ( do_sysctl(xch, &sysctl) )
+        return -1;
+
+    *schedule = sysctl.u.scheduler_op.u.sched_credit2;
+
+    return 0;
+}
+
+int
+xc_sched_credit2_params_get(
+    xc_interface *xch,
+    uint32_t cpupool_id,
+    struct xen_sysctl_credit2_schedule *schedule)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_scheduler_op;
+    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
+    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
+    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_getinfo;
+
+    if ( do_sysctl(xch, &sysctl) )
+        return -1;
+
+    *schedule = sysctl.u.scheduler_op.u.sched_credit2;
+
+    return 0;
+}
diff --git a/tools/libs/ctrl/xc_devicemodel_compat.c b/tools/libs/ctrl/xc_devicemodel_compat.c
new file mode 100644 (file)
index 0000000..a46011c
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * Compat shims for use of 3rd party consumers of libxenctrl device model
+ * functionality which has been split into separate libraries.
+ */
+
+#define XC_WANT_COMPAT_DEVICEMODEL_API
+#include "xc_private.h"
+
+int xc_hvm_create_ioreq_server(
+    xc_interface *xch, uint32_t domid, int handle_bufioreq,
+    ioservid_t *id)
+{
+    return xendevicemodel_create_ioreq_server(xch->dmod, domid,
+                                              handle_bufioreq, id);
+}
+
+int xc_hvm_get_ioreq_server_info(
+    xc_interface *xch, uint32_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
+    xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_port)
+{
+    return xendevicemodel_get_ioreq_server_info(xch->dmod, domid, id,
+                                                ioreq_pfn, bufioreq_pfn,
+                                                bufioreq_port);
+}
+
+int xc_hvm_map_io_range_to_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
+    uint64_t start, uint64_t end)
+{
+    return xendevicemodel_map_io_range_to_ioreq_server(xch->dmod, domid,
+                                                       id, is_mmio, start,
+                                                       end);
+}
+
+int xc_hvm_unmap_io_range_from_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
+    uint64_t start, uint64_t end)
+{
+    return xendevicemodel_unmap_io_range_from_ioreq_server(xch->dmod, domid,
+                                                           id, is_mmio,
+                                                           start, end);
+}
+
+int xc_hvm_map_pcidev_to_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
+    uint8_t bus, uint8_t device, uint8_t function)
+{
+    return xendevicemodel_map_pcidev_to_ioreq_server(xch->dmod, domid, id,
+                                                     segment, bus, device,
+                                                     function);
+}
+
+int xc_hvm_unmap_pcidev_from_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
+    uint8_t bus, uint8_t device, uint8_t function)
+{
+    return xendevicemodel_unmap_pcidev_from_ioreq_server(xch->dmod, domid,
+                                                         id, segment, bus,
+                                                         device, function);
+}
+
+int xc_hvm_destroy_ioreq_server(
+    xc_interface *xch, uint32_t domid, ioservid_t id)
+{
+    return xendevicemodel_destroy_ioreq_server(xch->dmod, domid, id);
+}
+
+int xc_hvm_set_ioreq_server_state(
+    xc_interface *xch, uint32_t domid, ioservid_t id, int enabled)
+{
+    return xendevicemodel_set_ioreq_server_state(xch->dmod, domid, id,
+                                                 enabled);
+}
+
+int xc_hvm_set_pci_intx_level(
+    xc_interface *xch, uint32_t domid, uint16_t segment, uint8_t bus,
+    uint8_t device, uint8_t intx, unsigned int level)
+{
+    return xendevicemodel_set_pci_intx_level(xch->dmod, domid, segment,
+                                             bus, device, intx, level);
+}
+
+int xc_hvm_set_isa_irq_level(
+    xc_interface *xch, uint32_t domid, uint8_t irq, unsigned int level)
+{
+    return xendevicemodel_set_isa_irq_level(xch->dmod, domid, irq, level);
+}
+
+int xc_hvm_set_pci_link_route(
+    xc_interface *xch, uint32_t domid, uint8_t link, uint8_t irq)
+{
+    return xendevicemodel_set_pci_link_route(xch->dmod, domid, link, irq);
+}
+
+int xc_hvm_inject_msi(
+    xc_interface *xch, uint32_t domid, uint64_t msi_addr, uint32_t msi_data)
+{
+    return xendevicemodel_inject_msi(xch->dmod, domid, msi_addr, msi_data);
+}
+
+int xc_hvm_track_dirty_vram(
+    xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr,
+    unsigned long *dirty_bitmap)
+{
+    return xendevicemodel_track_dirty_vram(xch->dmod, domid, first_pfn,
+                                           nr, dirty_bitmap);
+}
+
+int xc_hvm_modified_memory(
+    xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr)
+{
+    return xendevicemodel_modified_memory(xch->dmod, domid, first_pfn, nr);
+}
+
+int xc_hvm_set_mem_type(
+    xc_interface *xch, uint32_t domid, hvmmem_type_t type,
+    uint64_t first_pfn, uint32_t nr)
+{
+    return xendevicemodel_set_mem_type(xch->dmod, domid, type, first_pfn,
+                                       nr);
+}
+
+int xc_hvm_inject_trap(
+    xc_interface *xch, uint32_t domid, int vcpu, uint8_t vector,
+    uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t cr2)
+{
+    return xendevicemodel_inject_event(xch->dmod, domid, vcpu, vector,
+                                       type, error_code, insn_len, cr2);
+}
+
+int xc_domain_pin_memory_cacheattr(
+    xc_interface *xch, uint32_t domid, uint64_t start, uint64_t end,
+    uint32_t type)
+{
+    return xendevicemodel_pin_memory_cacheattr(xch->dmod, domid, start, end,
+                                               type);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_domain.c b/tools/libs/ctrl/xc_domain.c
new file mode 100644 (file)
index 0000000..43fab50
--- /dev/null
@@ -0,0 +1,2205 @@
+/******************************************************************************
+ * xc_domain.c
+ *
+ * API for manipulating and obtaining information on domains.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (c) 2003, K A Fraser.
+ */
+
+#include "xc_private.h"
+#include "xc_core.h"
+#include "xc_private.h"
+#include <xen/memory.h>
+#include <xen/hvm/hvm_op.h>
+
+int xc_domain_create(xc_interface *xch, uint32_t *pdomid,
+                     struct xen_domctl_createdomain *config)
+{
+    int err;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_createdomain;
+    domctl.domain = *pdomid;
+    domctl.u.createdomain = *config;
+
+    if ( (err = do_domctl(xch, &domctl)) != 0 )
+        return err;
+
+    *pdomid = (uint16_t)domctl.domain;
+    *config = domctl.u.createdomain;
+
+    return 0;
+}
+
+int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
+                         xen_pfn_t start_pfn, xen_pfn_t nr_pfns)
+{
+#if defined (__i386__) || defined (__x86_64__)
+    /*
+     * The x86 architecture provides cache coherency guarantees which prevent
+     * the need for this hypercall.  Avoid the overhead of making a hypercall
+     * just for Xen to return -ENOSYS.  It is safe to ignore this call on x86
+     * so we just return 0.
+     */
+    return 0;
+#else
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_cacheflush;
+    domctl.domain = domid;
+    domctl.u.cacheflush.start_pfn = start_pfn;
+    domctl.u.cacheflush.nr_pfns = nr_pfns;
+    return do_domctl(xch, &domctl);
+#endif
+}
+
+int xc_domain_pause(xc_interface *xch,
+                    uint32_t domid)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_pausedomain;
+    domctl.domain = domid;
+    return do_domctl(xch, &domctl);
+}
+
+
+int xc_domain_unpause(xc_interface *xch,
+                      uint32_t domid)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_unpausedomain;
+    domctl.domain = domid;
+    return do_domctl(xch, &domctl);
+}
+
+
+int xc_domain_destroy(xc_interface *xch,
+                      uint32_t domid)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_destroydomain;
+    domctl.domain = domid;
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_shutdown(xc_interface *xch,
+                       uint32_t domid,
+                       int reason)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BUFFER(sched_remote_shutdown_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+    {
+        PERROR("Could not allocate memory for xc_domain_shutdown hypercall");
+        goto out1;
+    }
+
+    arg->domain_id = domid;
+    arg->reason = reason;
+    ret = xencall2(xch->xcall, __HYPERVISOR_sched_op,
+                   SCHEDOP_remote_shutdown,
+                   HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(xch, arg);
+
+ out1:
+    return ret;
+}
+
+
+int xc_domain_node_setaffinity(xc_interface *xch,
+                               uint32_t domid,
+                               xc_nodemap_t nodemap)
+{
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+    int ret = -1;
+    int nodesize;
+
+    nodesize = xc_get_nodemap_size(xch);
+    if (nodesize <= 0)
+    {
+        PERROR("Could not get number of nodes");
+        goto out;
+    }
+
+    local = xc_hypercall_buffer_alloc(xch, local, nodesize);
+    if ( local == NULL )
+    {
+        PERROR("Could not allocate memory for setnodeaffinity domctl hypercall");
+        goto out;
+    }
+
+    domctl.cmd = XEN_DOMCTL_setnodeaffinity;
+    domctl.domain = domid;
+
+    memcpy(local, nodemap, nodesize);
+    set_xen_guest_handle(domctl.u.nodeaffinity.nodemap.bitmap, local);
+    domctl.u.nodeaffinity.nodemap.nr_bits = nodesize * 8;
+
+    ret = do_domctl(xch, &domctl);
+
+    xc_hypercall_buffer_free(xch, local);
+
+ out:
+    return ret;
+}
+
+int xc_domain_node_getaffinity(xc_interface *xch,
+                               uint32_t domid,
+                               xc_nodemap_t nodemap)
+{
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+    int ret = -1;
+    int nodesize;
+
+    nodesize = xc_get_nodemap_size(xch);
+    if (nodesize <= 0)
+    {
+        PERROR("Could not get number of nodes");
+        goto out;
+    }
+
+    local = xc_hypercall_buffer_alloc(xch, local, nodesize);
+    if ( local == NULL )
+    {
+        PERROR("Could not allocate memory for getnodeaffinity domctl hypercall");
+        goto out;
+    }
+
+    domctl.cmd = XEN_DOMCTL_getnodeaffinity;
+    domctl.domain = domid;
+
+    set_xen_guest_handle(domctl.u.nodeaffinity.nodemap.bitmap, local);
+    domctl.u.nodeaffinity.nodemap.nr_bits = nodesize * 8;
+
+    ret = do_domctl(xch, &domctl);
+
+    memcpy(nodemap, local, nodesize);
+
+    xc_hypercall_buffer_free(xch, local);
+
+ out:
+    return ret;
+}
+
+int xc_vcpu_setaffinity(xc_interface *xch,
+                        uint32_t domid,
+                        int vcpu,
+                        xc_cpumap_t cpumap_hard_inout,
+                        xc_cpumap_t cpumap_soft_inout,
+                        uint32_t flags)
+{
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(cpumap_hard_inout, 0,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_HYPERCALL_BOUNCE(cpumap_soft_inout, 0,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    int ret = -1;
+    int cpusize;
+
+    cpusize = xc_get_cpumap_size(xch);
+    if (cpusize <= 0)
+    {
+        PERROR("Could not get number of cpus");
+        return -1;
+    }
+
+    HYPERCALL_BOUNCE_SET_SIZE(cpumap_hard_inout, cpusize);
+    HYPERCALL_BOUNCE_SET_SIZE(cpumap_soft_inout, cpusize);
+
+    if ( xc_hypercall_bounce_pre(xch, cpumap_hard_inout) ||
+         xc_hypercall_bounce_pre(xch, cpumap_soft_inout) )
+    {
+        PERROR("Could not allocate hcall buffers for DOMCTL_setvcpuaffinity");
+        goto out;
+    }
+
+    domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
+    domctl.domain = domid;
+    domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.flags = flags;
+
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap,
+                         cpumap_hard_inout);
+    domctl.u.vcpuaffinity.cpumap_hard.nr_bits = cpusize * 8;
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_soft.bitmap,
+                         cpumap_soft_inout);
+    domctl.u.vcpuaffinity.cpumap_soft.nr_bits = cpusize * 8;
+
+    ret = do_domctl(xch, &domctl);
+
+ out:
+    xc_hypercall_bounce_post(xch, cpumap_hard_inout);
+    xc_hypercall_bounce_post(xch, cpumap_soft_inout);
+
+    return ret;
+}
+
+
+int xc_vcpu_getaffinity(xc_interface *xch,
+                        uint32_t domid,
+                        int vcpu,
+                        xc_cpumap_t cpumap_hard,
+                        xc_cpumap_t cpumap_soft,
+                        uint32_t flags)
+{
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(cpumap_hard, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(cpumap_soft, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    int ret = -1;
+    int cpusize;
+
+    cpusize = xc_get_cpumap_size(xch);
+    if (cpusize <= 0)
+    {
+        PERROR("Could not get number of cpus");
+        return -1;
+    }
+
+    HYPERCALL_BOUNCE_SET_SIZE(cpumap_hard, cpusize);
+    HYPERCALL_BOUNCE_SET_SIZE(cpumap_soft, cpusize);
+
+    if ( xc_hypercall_bounce_pre(xch, cpumap_hard) ||
+         xc_hypercall_bounce_pre(xch, cpumap_soft) )
+    {
+        PERROR("Could not allocate hcall buffers for DOMCTL_getvcpuaffinity");
+        goto out;
+    }
+
+    domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
+    domctl.domain = domid;
+    domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.flags = flags;
+
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap,
+                         cpumap_hard);
+    domctl.u.vcpuaffinity.cpumap_hard.nr_bits = cpusize * 8;
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_soft.bitmap,
+                         cpumap_soft);
+    domctl.u.vcpuaffinity.cpumap_soft.nr_bits = cpusize * 8;
+
+    ret = do_domctl(xch, &domctl);
+
+ out:
+    xc_hypercall_bounce_post(xch, cpumap_hard);
+    xc_hypercall_bounce_post(xch, cpumap_soft);
+
+    return ret;
+}
+
+int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
+                              unsigned int *guest_width)
+{
+    DECLARE_DOMCTL;
+
+    memset(&domctl, 0, sizeof(domctl));
+    domctl.domain = domid;
+    domctl.cmd = XEN_DOMCTL_get_address_size;
+
+    if ( do_domctl(xch, &domctl) != 0 )
+        return 1;
+
+    /* We want the result in bytes */
+    *guest_width = domctl.u.address_size.size / 8;
+    return 0;
+}
+
+int xc_dom_vuart_init(xc_interface *xch,
+                      uint32_t type,
+                      uint32_t domid,
+                      uint32_t console_domid,
+                      xen_pfn_t gfn,
+                      evtchn_port_t *evtchn)
+{
+    DECLARE_DOMCTL;
+    int rc = 0;
+
+    memset(&domctl, 0, sizeof(domctl));
+
+    domctl.cmd = XEN_DOMCTL_vuart_op;
+    domctl.domain = domid;
+    domctl.u.vuart_op.cmd = XEN_DOMCTL_VUART_OP_INIT;
+    domctl.u.vuart_op.type = type;
+    domctl.u.vuart_op.console_domid = console_domid;
+    domctl.u.vuart_op.gfn = gfn;
+
+    if ( (rc = do_domctl(xch, &domctl)) < 0 )
+        return rc;
+
+    *evtchn = domctl.u.vuart_op.evtchn;
+
+    return rc;
+}
+
+int xc_domain_getinfo(xc_interface *xch,
+                      uint32_t first_domid,
+                      unsigned int max_doms,
+                      xc_dominfo_t *info)
+{
+    unsigned int nr_doms;
+    uint32_t next_domid = first_domid;
+    DECLARE_DOMCTL;
+    int rc = 0;
+
+    memset(info, 0, max_doms*sizeof(xc_dominfo_t));
+
+    for ( nr_doms = 0; nr_doms < max_doms; nr_doms++ )
+    {
+        domctl.cmd = XEN_DOMCTL_getdomaininfo;
+        domctl.domain = next_domid;
+        if ( (rc = do_domctl(xch, &domctl)) < 0 )
+            break;
+        info->domid      = domctl.domain;
+
+        info->dying    = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_dying);
+        info->shutdown = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_shutdown);
+        info->paused   = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_paused);
+        info->blocked  = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_blocked);
+        info->running  = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_running);
+        info->hvm      = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest);
+        info->debugged = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_debugged);
+        info->xenstore = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_xs_domain);
+        info->hap      = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hap);
+
+        info->shutdown_reason =
+            (domctl.u.getdomaininfo.flags>>XEN_DOMINF_shutdownshift) &
+            XEN_DOMINF_shutdownmask;
+
+        if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_crash) )
+        {
+            info->shutdown = 0;
+            info->crashed  = 1;
+        }
+
+        info->ssidref  = domctl.u.getdomaininfo.ssidref;
+        info->nr_pages = domctl.u.getdomaininfo.tot_pages;
+        info->nr_outstanding_pages = domctl.u.getdomaininfo.outstanding_pages;
+        info->nr_shared_pages = domctl.u.getdomaininfo.shr_pages;
+        info->nr_paged_pages = domctl.u.getdomaininfo.paged_pages;
+        info->max_memkb = domctl.u.getdomaininfo.max_pages << (PAGE_SHIFT-10);
+        info->shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
+        info->cpu_time = domctl.u.getdomaininfo.cpu_time;
+        info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
+        info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
+        info->cpupool = domctl.u.getdomaininfo.cpupool;
+        info->arch_config = domctl.u.getdomaininfo.arch_config;
+
+        memcpy(info->handle, domctl.u.getdomaininfo.handle,
+               sizeof(xen_domain_handle_t));
+
+        next_domid = (uint16_t)domctl.domain + 1;
+        info++;
+    }
+
+    if ( nr_doms == 0 )
+        return rc;
+
+    return nr_doms;
+}
+
+int xc_domain_getinfolist(xc_interface *xch,
+                          uint32_t first_domain,
+                          unsigned int max_domains,
+                          xc_domaininfo_t *info)
+{
+    int ret = 0;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(info, max_domains*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, info) )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
+    sysctl.u.getdomaininfolist.first_domain = first_domain;
+    sysctl.u.getdomaininfolist.max_domains  = max_domains;
+    set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
+
+    if ( xc_sysctl(xch, &sysctl) < 0 )
+        ret = -1;
+    else
+        ret = sysctl.u.getdomaininfolist.num_domains;
+
+    xc_hypercall_bounce_post(xch, info);
+
+    return ret;
+}
+
+/* set broken page p2m */
+int xc_set_broken_page_p2m(xc_interface *xch,
+                           uint32_t domid,
+                           unsigned long pfn)
+{
+    int ret;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_set_broken_page_p2m;
+    domctl.domain = domid;
+    domctl.u.set_broken_page_p2m.pfn = pfn;
+    ret = do_domctl(xch, &domctl);
+
+    return ret ? -1 : 0;
+}
+
+/* get info from hvm guest for save */
+int xc_domain_hvm_getcontext(xc_interface *xch,
+                             uint32_t domid,
+                             uint8_t *ctxt_buf,
+                             uint32_t size)
+{
+    int ret;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_gethvmcontext;
+    domctl.domain = domid;
+    domctl.u.hvmcontext.size = size;
+    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
+
+    ret = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, ctxt_buf);
+
+    return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
+}
+
+/* Get just one element of the HVM guest context.
+ * size must be >= HVM_SAVE_LENGTH(type) */
+int xc_domain_hvm_getcontext_partial(xc_interface *xch,
+                                     uint32_t domid,
+                                     uint16_t typecode,
+                                     uint16_t instance,
+                                     void *ctxt_buf,
+                                     uint32_t size)
+{
+    int ret;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( !ctxt_buf || xc_hypercall_bounce_pre(xch, ctxt_buf) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_gethvmcontext_partial;
+    domctl.domain = domid;
+    domctl.u.hvmcontext_partial.type = typecode;
+    domctl.u.hvmcontext_partial.instance = instance;
+    domctl.u.hvmcontext_partial.bufsz = size;
+    set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf);
+
+    ret = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, ctxt_buf);
+
+    return ret ? -1 : 0;
+}
+
+/* set info to hvm guest for restore */
+int xc_domain_hvm_setcontext(xc_interface *xch,
+                             uint32_t domid,
+                             uint8_t *ctxt_buf,
+                             uint32_t size)
+{
+    int ret;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_sethvmcontext;
+    domctl.domain = domid;
+    domctl.u.hvmcontext.size = size;
+    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
+
+    ret = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, ctxt_buf);
+
+    return ret;
+}
+
+int xc_vcpu_getcontext(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t vcpu,
+                       vcpu_guest_context_any_t *ctxt)
+{
+    int rc;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, ctxt) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_getvcpucontext;
+    domctl.domain = domid;
+    domctl.u.vcpucontext.vcpu   = (uint16_t)vcpu;
+    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
+
+    rc = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, ctxt);
+
+    return rc;
+}
+
+int xc_vcpu_get_extstate(xc_interface *xch,
+                         uint32_t domid,
+                         uint32_t vcpu,
+                         xc_vcpu_extstate_t *extstate)
+{
+    int rc = -ENODEV;
+#if defined (__i386__) || defined(__x86_64__)
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(void, buffer);
+    bool get_state;
+
+    if ( !extstate )
+        return -EINVAL;
+
+    domctl.cmd = XEN_DOMCTL_getvcpuextstate;
+    domctl.domain = domid;
+    domctl.u.vcpuextstate.vcpu = (uint16_t)vcpu;
+    domctl.u.vcpuextstate.xfeature_mask = extstate->xfeature_mask;
+    domctl.u.vcpuextstate.size = extstate->size;
+
+    get_state = (extstate->size != 0);
+
+    if ( get_state )
+    {
+        buffer = xc_hypercall_buffer_alloc(xch, buffer, extstate->size);
+
+        if ( !buffer )
+        {
+            PERROR("Unable to allocate memory for vcpu%u's xsave context",
+                   vcpu);
+            rc = -ENOMEM;
+            goto out;
+        }
+
+        set_xen_guest_handle(domctl.u.vcpuextstate.buffer, buffer);
+    }
+
+    rc = do_domctl(xch, &domctl);
+
+    if ( rc )
+        goto out;
+
+    /* A query for the size of buffer to use. */
+    if ( !extstate->size && !extstate->xfeature_mask )
+    {
+        extstate->xfeature_mask = domctl.u.vcpuextstate.xfeature_mask;
+        extstate->size = domctl.u.vcpuextstate.size;
+        goto out;
+    }
+
+    if ( get_state )
+        memcpy(extstate->buffer, buffer, extstate->size);
+
+out:
+    if ( get_state )
+        xc_hypercall_buffer_free(xch, buffer);
+#endif
+
+    return rc;
+}
+
+int xc_watchdog(xc_interface *xch,
+                uint32_t id,
+                uint32_t timeout)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BUFFER(sched_watchdog_t, arg);
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+    {
+        PERROR("Could not allocate memory for xc_watchdog hypercall");
+        goto out1;
+    }
+
+    arg->id = id;
+    arg->timeout = timeout;
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_sched_op,
+                   SCHEDOP_watchdog,
+                   HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_buffer_free(xch, arg);
+
+ out1:
+    return ret;
+}
+
+
+int xc_shadow_control(xc_interface *xch,
+                      uint32_t domid,
+                      unsigned int sop,
+                      xc_hypercall_buffer_t *dirty_bitmap,
+                      unsigned long pages,
+                      unsigned long *mb,
+                      uint32_t mode,
+                      xc_shadow_op_stats_t *stats)
+{
+    int rc;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(dirty_bitmap);
+
+    memset(&domctl, 0, sizeof(domctl));
+
+    domctl.cmd = XEN_DOMCTL_shadow_op;
+    domctl.domain = domid;
+    domctl.u.shadow_op.op     = sop;
+    domctl.u.shadow_op.pages  = pages;
+    domctl.u.shadow_op.mb     = mb ? *mb : 0;
+    domctl.u.shadow_op.mode   = mode;
+    if (dirty_bitmap != NULL)
+        set_xen_guest_handle(domctl.u.shadow_op.dirty_bitmap,
+                                dirty_bitmap);
+
+    rc = do_domctl(xch, &domctl);
+
+    if ( stats )
+        memcpy(stats, &domctl.u.shadow_op.stats,
+               sizeof(xc_shadow_op_stats_t));
+    
+    if ( mb ) 
+        *mb = domctl.u.shadow_op.mb;
+
+    return (rc == 0) ? domctl.u.shadow_op.pages : rc;
+}
+
+int xc_domain_setmaxmem(xc_interface *xch,
+                        uint32_t domid,
+                        uint64_t max_memkb)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_max_mem;
+    domctl.domain = domid;
+    domctl.u.max_mem.max_memkb = max_memkb;
+    return do_domctl(xch, &domctl);
+}
+
+#if defined(__i386__) || defined(__x86_64__)
+int xc_domain_set_memory_map(xc_interface *xch,
+                               uint32_t domid,
+                               struct e820entry entries[],
+                               uint32_t nr_entries)
+{
+    int rc;
+    struct xen_foreign_memory_map fmap = {
+        .domid = domid,
+        .map = { .nr_entries = nr_entries }
+    };
+    DECLARE_HYPERCALL_BOUNCE(entries, nr_entries * sizeof(struct e820entry),
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( !entries || xc_hypercall_bounce_pre(xch, entries) )
+        return -1;
+
+    set_xen_guest_handle(fmap.map.buffer, entries);
+
+    rc = do_memory_op(xch, XENMEM_set_memory_map, &fmap, sizeof(fmap));
+
+    xc_hypercall_bounce_post(xch, entries);
+
+    return rc;
+}
+
+int xc_get_machine_memory_map(xc_interface *xch,
+                              struct e820entry entries[],
+                              uint32_t max_entries)
+{
+    int rc;
+    struct xen_memory_map memmap = {
+        .nr_entries = max_entries
+    };
+    DECLARE_HYPERCALL_BOUNCE(entries, sizeof(struct e820entry) * max_entries,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( !entries || xc_hypercall_bounce_pre(xch, entries) || max_entries <= 1)
+        return -1;
+
+
+    set_xen_guest_handle(memmap.buffer, entries);
+
+    rc = do_memory_op(xch, XENMEM_machine_memory_map, &memmap, sizeof(memmap));
+
+    xc_hypercall_bounce_post(xch, entries);
+
+    return rc ? rc : memmap.nr_entries;
+}
+int xc_domain_set_memmap_limit(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long map_limitkb)
+{
+    struct e820entry e820;
+
+    e820.addr = 0;
+    e820.size = (uint64_t)map_limitkb << 10;
+    e820.type = E820_RAM;
+
+    return xc_domain_set_memory_map(xch, domid, &e820, 1);
+}
+#else
+int xc_domain_set_memmap_limit(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long map_limitkb)
+{
+    PERROR("Function not implemented");
+    errno = ENOSYS;
+    return -1;
+}
+#endif
+
+int xc_reserved_device_memory_map(xc_interface *xch,
+                                  uint32_t flags,
+                                  uint16_t seg,
+                                  uint8_t bus,
+                                  uint8_t devfn,
+                                  struct xen_reserved_device_memory entries[],
+                                  uint32_t *max_entries)
+{
+    int rc;
+    struct xen_reserved_device_memory_map xrdmmap = {
+        .flags = flags,
+        .dev.pci.seg = seg,
+        .dev.pci.bus = bus,
+        .dev.pci.devfn = devfn,
+        .nr_entries = *max_entries
+    };
+    DECLARE_HYPERCALL_BOUNCE(entries,
+                             sizeof(struct xen_reserved_device_memory) *
+                             *max_entries, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, entries) )
+        return -1;
+
+    set_xen_guest_handle(xrdmmap.buffer, entries);
+
+    rc = do_memory_op(xch, XENMEM_reserved_device_memory_map,
+                      &xrdmmap, sizeof(xrdmmap));
+
+    xc_hypercall_bounce_post(xch, entries);
+
+    *max_entries = xrdmmap.nr_entries;
+
+    return rc;
+}
+
+int xc_domain_set_time_offset(xc_interface *xch,
+                              uint32_t domid,
+                              int32_t time_offset_seconds)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_settimeoffset;
+    domctl.domain = domid;
+    domctl.u.settimeoffset.time_offset_seconds = time_offset_seconds;
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_disable_migrate(xc_interface *xch, uint32_t domid)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_disable_migrate;
+    domctl.domain = domid;
+    domctl.u.disable_migrate.disable = 1;
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_set_tsc_info(xc_interface *xch,
+                           uint32_t domid,
+                           uint32_t tsc_mode,
+                           uint64_t elapsed_nsec,
+                           uint32_t gtsc_khz,
+                           uint32_t incarnation)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_settscinfo;
+    domctl.domain = domid;
+    domctl.u.tsc_info.tsc_mode = tsc_mode;
+    domctl.u.tsc_info.elapsed_nsec = elapsed_nsec;
+    domctl.u.tsc_info.gtsc_khz = gtsc_khz;
+    domctl.u.tsc_info.incarnation = incarnation;
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_get_tsc_info(xc_interface *xch,
+                           uint32_t domid,
+                           uint32_t *tsc_mode,
+                           uint64_t *elapsed_nsec,
+                           uint32_t *gtsc_khz,
+                           uint32_t *incarnation)
+{
+    int rc;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_gettscinfo;
+    domctl.domain = domid;
+    rc = do_domctl(xch, &domctl);
+    if ( rc == 0 )
+    {
+        *tsc_mode = domctl.u.tsc_info.tsc_mode;
+        *elapsed_nsec = domctl.u.tsc_info.elapsed_nsec;
+        *gtsc_khz = domctl.u.tsc_info.gtsc_khz;
+        *incarnation = domctl.u.tsc_info.incarnation;
+    }
+    return rc;
+}
+
+
+int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns)
+{
+    long rc = do_memory_op(xch, XENMEM_maximum_gpfn, &domid, sizeof(domid));
+
+    if ( rc >= 0 )
+    {
+        *gpfns = rc;
+        rc = 0;
+    }
+    return rc;
+}
+
+int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns)
+{
+    int rc = xc_domain_maximum_gpfn(xch, domid, gpfns);
+
+    if ( rc >= 0 )
+        *gpfns += 1;
+
+    return rc;
+}
+
+int xc_domain_increase_reservation(xc_interface *xch,
+                                   uint32_t domid,
+                                   unsigned long nr_extents,
+                                   unsigned int extent_order,
+                                   unsigned int mem_flags,
+                                   xen_pfn_t *extent_start)
+{
+    int err;
+    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    struct xen_memory_reservation reservation = {
+        .nr_extents   = nr_extents,
+        .extent_order = extent_order,
+        .mem_flags    = mem_flags,
+        .domid        = domid
+    };
+
+    /* may be NULL */
+    if ( xc_hypercall_bounce_pre(xch, extent_start) )
+    {
+        PERROR("Could not bounce memory for XENMEM_increase_reservation hypercall");
+        return -1;
+    }
+
+    set_xen_guest_handle(reservation.extent_start, extent_start);
+
+    err = do_memory_op(xch, XENMEM_increase_reservation, &reservation, sizeof(reservation));
+
+    xc_hypercall_bounce_post(xch, extent_start);
+
+    return err;
+}
+
+int xc_domain_increase_reservation_exact(xc_interface *xch,
+                                         uint32_t domid,
+                                         unsigned long nr_extents,
+                                         unsigned int extent_order,
+                                         unsigned int mem_flags,
+                                         xen_pfn_t *extent_start)
+{
+    int err;
+
+    err = xc_domain_increase_reservation(xch, domid, nr_extents,
+                                         extent_order, mem_flags, extent_start);
+
+    if ( err == nr_extents )
+        return 0;
+
+    if ( err >= 0 )
+    {
+        DPRINTF("Failed allocation for dom %d: "
+                "%ld extents of order %d, mem_flags %x\n",
+                domid, nr_extents, extent_order, mem_flags);
+        errno = ENOMEM;
+        err = -1;
+    }
+
+    return err;
+}
+
+int xc_domain_decrease_reservation(xc_interface *xch,
+                                   uint32_t domid,
+                                   unsigned long nr_extents,
+                                   unsigned int extent_order,
+                                   xen_pfn_t *extent_start)
+{
+    int err;
+    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    struct xen_memory_reservation reservation = {
+        .nr_extents   = nr_extents,
+        .extent_order = extent_order,
+        .mem_flags    = 0,
+        .domid        = domid
+    };
+
+    if ( extent_start == NULL )
+    {
+        DPRINTF("decrease_reservation extent_start is NULL!\n");
+        errno = EINVAL;
+        return -1;
+    }
+
+    if ( xc_hypercall_bounce_pre(xch, extent_start) )
+    {
+        PERROR("Could not bounce memory for XENMEM_decrease_reservation hypercall");
+        return -1;
+    }
+    set_xen_guest_handle(reservation.extent_start, extent_start);
+
+    err = do_memory_op(xch, XENMEM_decrease_reservation, &reservation, sizeof(reservation));
+
+    xc_hypercall_bounce_post(xch, extent_start);
+
+    return err;
+}
+
+int xc_domain_decrease_reservation_exact(xc_interface *xch,
+                                         uint32_t domid,
+                                         unsigned long nr_extents,
+                                         unsigned int extent_order,
+                                         xen_pfn_t *extent_start)
+{
+    int err;
+
+    err = xc_domain_decrease_reservation(xch, domid, nr_extents,
+                                         extent_order, extent_start);
+
+    if ( err == nr_extents )
+        return 0;
+
+    if ( err >= 0 )
+    {
+        DPRINTF("Failed deallocation for dom %d: %ld extents of order %d\n",
+                domid, nr_extents, extent_order);
+        errno = EINVAL;
+        err = -1;
+    }
+
+    return err;
+}
+
+int xc_domain_add_to_physmap(xc_interface *xch,
+                             uint32_t domid,
+                             unsigned int space,
+                             unsigned long idx,
+                             xen_pfn_t gpfn)
+{
+    struct xen_add_to_physmap xatp = {
+        .domid = domid,
+        .space = space,
+        .idx = idx,
+        .gpfn = gpfn,
+    };
+    return do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
+}
+
+int xc_domain_add_to_physmap_batch(xc_interface *xch,
+                                   uint32_t domid,
+                                   uint32_t foreign_domid,
+                                   unsigned int space,
+                                   unsigned int size,
+                                   xen_ulong_t *idxs,
+                                   xen_pfn_t *gpfns,
+                                   int *errs)
+{
+    int rc;
+    DECLARE_HYPERCALL_BOUNCE(idxs, size * sizeof(*idxs), XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(gpfns, size * sizeof(*gpfns), XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(errs, size * sizeof(*errs), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    struct xen_add_to_physmap_batch xatp_batch = {
+        .domid = domid,
+        .space = space,
+        .size = size,
+        .u = { .foreign_domid = foreign_domid }
+    };
+
+    if ( xc_hypercall_bounce_pre(xch, idxs)  ||
+         xc_hypercall_bounce_pre(xch, gpfns) ||
+         xc_hypercall_bounce_pre(xch, errs)  )
+    {
+        PERROR("Could not bounce memory for XENMEM_add_to_physmap_batch");
+        rc = -1;
+        goto out;
+    }
+
+    set_xen_guest_handle(xatp_batch.idxs, idxs);
+    set_xen_guest_handle(xatp_batch.gpfns, gpfns);
+    set_xen_guest_handle(xatp_batch.errs, errs);
+
+    rc = do_memory_op(xch, XENMEM_add_to_physmap_batch,
+                      &xatp_batch, sizeof(xatp_batch));
+
+out:
+    xc_hypercall_bounce_post(xch, idxs);
+    xc_hypercall_bounce_post(xch, gpfns);
+    xc_hypercall_bounce_post(xch, errs);
+
+    return rc;
+}
+
+int xc_domain_remove_from_physmap(xc_interface *xch,
+                                  uint32_t domid,
+                                  xen_pfn_t gpfn)
+{
+    struct xen_remove_from_physmap xrfp = {
+        .domid = domid,
+        .gpfn = gpfn,
+    };
+    return do_memory_op(xch, XENMEM_remove_from_physmap, &xrfp, sizeof(xrfp));
+}
+
+int xc_domain_claim_pages(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long nr_pages)
+{
+    int err;
+    struct xen_memory_reservation reservation = {
+        .nr_extents   = nr_pages,
+        .extent_order = 0,
+        .mem_flags    = 0, /* no flags */
+        .domid        = domid
+    };
+
+    set_xen_guest_handle(reservation.extent_start, HYPERCALL_BUFFER_NULL);
+
+    err = do_memory_op(xch, XENMEM_claim_pages, &reservation, sizeof(reservation));
+    /* Ignore it if the hypervisor does not support the call. */
+    if (err == -1 && errno == ENOSYS)
+        err = errno = 0;
+    return err;
+}
+
+int xc_domain_populate_physmap(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long nr_extents,
+                               unsigned int extent_order,
+                               unsigned int mem_flags,
+                               xen_pfn_t *extent_start)
+{
+    int err;
+    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    struct xen_memory_reservation reservation = {
+        .nr_extents   = nr_extents,
+        .extent_order = extent_order,
+        .mem_flags    = mem_flags,
+        .domid        = domid
+    };
+
+    if ( xc_hypercall_bounce_pre(xch, extent_start) )
+    {
+        PERROR("Could not bounce memory for XENMEM_populate_physmap hypercall");
+        return -1;
+    }
+    set_xen_guest_handle(reservation.extent_start, extent_start);
+
+    err = do_memory_op(xch, XENMEM_populate_physmap, &reservation, sizeof(reservation));
+
+    xc_hypercall_bounce_post(xch, extent_start);
+    return err;
+}
+
+int xc_domain_populate_physmap_exact(xc_interface *xch,
+                                     uint32_t domid,
+                                     unsigned long nr_extents,
+                                     unsigned int extent_order,
+                                     unsigned int mem_flags,
+                                     xen_pfn_t *extent_start)
+{
+    int err;
+
+    err = xc_domain_populate_physmap(xch, domid, nr_extents,
+                                     extent_order, mem_flags, extent_start);
+    if ( err == nr_extents )
+        return 0;
+
+    if ( err >= 0 )
+    {
+        DPRINTF("Failed allocation for dom %d: %ld extents of order %d\n",
+                domid, nr_extents, extent_order);
+        errno = EBUSY;
+        err = -1;
+    }
+
+    return err;
+}
+
+int xc_domain_memory_exchange_pages(xc_interface *xch,
+                                    uint32_t domid,
+                                    unsigned long nr_in_extents,
+                                    unsigned int in_order,
+                                    xen_pfn_t *in_extents,
+                                    unsigned long nr_out_extents,
+                                    unsigned int out_order,
+                                    xen_pfn_t *out_extents)
+{
+    int rc = -1;
+    DECLARE_HYPERCALL_BOUNCE(in_extents, nr_in_extents*sizeof(*in_extents), XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(out_extents, nr_out_extents*sizeof(*out_extents), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    struct xen_memory_exchange exchange = {
+        .in = {
+            .nr_extents   = nr_in_extents,
+            .extent_order = in_order,
+            .domid        = domid
+        },
+        .out = {
+            .nr_extents   = nr_out_extents,
+            .extent_order = out_order,
+            .domid        = domid
+        }
+    };
+
+    if ( xc_hypercall_bounce_pre(xch, in_extents) ||
+         xc_hypercall_bounce_pre(xch, out_extents))
+        goto out;
+
+    set_xen_guest_handle(exchange.in.extent_start, in_extents);
+    set_xen_guest_handle(exchange.out.extent_start, out_extents);
+
+    rc = do_memory_op(xch, XENMEM_exchange, &exchange, sizeof(exchange));
+
+out:
+    xc_hypercall_bounce_post(xch, in_extents);
+    xc_hypercall_bounce_post(xch, out_extents);
+
+    return rc;
+}
+
+/* Currently only implemented on x86. This cannot be handled in the
+ * caller, e.g. by looking for errno==ENOSYS because of the broken
+ * error reporting style. Once this is fixed then this condition can
+ * be removed.
+ */
+#if defined(__i386__)||defined(__x86_64__)
+static int xc_domain_pod_target(xc_interface *xch,
+                                int op,
+                                uint32_t domid,
+                                uint64_t target_pages,
+                                uint64_t *tot_pages,
+                                uint64_t *pod_cache_pages,
+                                uint64_t *pod_entries)
+{
+    int err;
+
+    struct xen_pod_target pod_target = {
+        .domid = domid,
+        .target_pages = target_pages
+    };
+
+    err = do_memory_op(xch, op, &pod_target, sizeof(pod_target));
+
+    if ( err < 0 )
+    {
+        DPRINTF("Failed %s_pod_target dom %d\n",
+                (op==XENMEM_set_pod_target)?"set":"get",
+                domid);
+        errno = -err;
+        err = -1;
+    }
+    else
+        err = 0;
+
+    if ( tot_pages )
+        *tot_pages = pod_target.tot_pages;
+    if ( pod_cache_pages )
+        *pod_cache_pages = pod_target.pod_cache_pages;
+    if ( pod_entries )
+        *pod_entries = pod_target.pod_entries;
+
+    return err;
+}
+
+
+int xc_domain_set_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t target_pages,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries)
+{
+    return xc_domain_pod_target(xch,
+                                XENMEM_set_pod_target,
+                                domid,
+                                target_pages,
+                                tot_pages,
+                                pod_cache_pages,
+                                pod_entries);
+}
+
+int xc_domain_get_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries)
+{
+    return xc_domain_pod_target(xch,
+                                XENMEM_get_pod_target,
+                                domid,
+                                -1,
+                                tot_pages,
+                                pod_cache_pages,
+                                pod_entries);
+}
+#else
+int xc_domain_set_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t target_pages,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries)
+{
+    return 0;
+}
+int xc_domain_get_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries)
+{
+    /* On x86 (above) xc_domain_pod_target will incorrectly return -1
+     * with errno==-1 on error. Do the same for least surprise. */
+    errno = -1;
+    return -1;
+}
+#endif
+
+int xc_domain_max_vcpus(xc_interface *xch, uint32_t domid, unsigned int max)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_max_vcpus;
+    domctl.domain = domid;
+    domctl.u.max_vcpus.max    = max;
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
+                        xen_domain_handle_t handle)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_setdomainhandle;
+    domctl.domain = domid;
+    memcpy(domctl.u.setdomainhandle.handle, handle,
+           sizeof(xen_domain_handle_t));
+    return do_domctl(xch, &domctl);
+}
+
+int xc_vcpu_getinfo(xc_interface *xch,
+                    uint32_t domid,
+                    uint32_t vcpu,
+                    xc_vcpuinfo_t *info)
+{
+    int rc;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_getvcpuinfo;
+    domctl.domain = domid;
+    domctl.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
+
+    rc = do_domctl(xch, &domctl);
+
+    memcpy(info, &domctl.u.getvcpuinfo, sizeof(*info));
+
+    return rc;
+}
+
+int xc_domain_ioport_permission(xc_interface *xch,
+                                uint32_t domid,
+                                uint32_t first_port,
+                                uint32_t nr_ports,
+                                uint32_t allow_access)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_ioport_permission;
+    domctl.domain = domid;
+    domctl.u.ioport_permission.first_port = first_port;
+    domctl.u.ioport_permission.nr_ports = nr_ports;
+    domctl.u.ioport_permission.allow_access = allow_access;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_availheap(xc_interface *xch,
+                 int min_width,
+                 int max_width,
+                 int node,
+                 uint64_t *bytes)
+{
+    DECLARE_SYSCTL;
+    int rc;
+
+    sysctl.cmd = XEN_SYSCTL_availheap;
+    sysctl.u.availheap.min_bitwidth = min_width;
+    sysctl.u.availheap.max_bitwidth = max_width;
+    sysctl.u.availheap.node = node;
+
+    rc = xc_sysctl(xch, &sysctl);
+
+    *bytes = sysctl.u.availheap.avail_bytes;
+
+    return rc;
+}
+
+int xc_vcpu_setcontext(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t vcpu,
+                       vcpu_guest_context_any_t *ctxt)
+{
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    int rc;
+
+    if ( xc_hypercall_bounce_pre(xch, ctxt) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_setvcpucontext;
+    domctl.domain = domid;
+    domctl.u.vcpucontext.vcpu = vcpu;
+    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
+
+    rc = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, ctxt);
+
+    return rc;
+}
+
+int xc_domain_irq_permission(xc_interface *xch,
+                             uint32_t domid,
+                             uint8_t pirq,
+                             uint8_t allow_access)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_irq_permission;
+    domctl.domain = domid;
+    domctl.u.irq_permission.pirq = pirq;
+    domctl.u.irq_permission.allow_access = allow_access;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_iomem_permission(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long first_mfn,
+                               unsigned long nr_mfns,
+                               uint8_t allow_access)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_iomem_permission;
+    domctl.domain = domid;
+    domctl.u.iomem_permission.first_mfn = first_mfn;
+    domctl.u.iomem_permission.nr_mfns = nr_mfns;
+    domctl.u.iomem_permission.allow_access = allow_access;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_send_trigger(xc_interface *xch,
+                           uint32_t domid,
+                           uint32_t trigger,
+                           uint32_t vcpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_sendtrigger;
+    domctl.domain = domid;
+    domctl.u.sendtrigger.trigger = trigger;
+    domctl.u.sendtrigger.vcpu = vcpu;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value)
+{
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_param_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->domid = dom;
+    arg->index = param;
+    arg->value = value;
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op,
+                  HVMOP_set_param,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value)
+{
+    DECLARE_HYPERCALL_BUFFER(xen_hvm_param_t, arg);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+    if ( arg == NULL )
+        return -1;
+
+    arg->domid = dom;
+    arg->index = param;
+    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op,
+                  HVMOP_get_param,
+                  HYPERCALL_BUFFER_AS_ARG(arg));
+    *value = arg->value;
+    xc_hypercall_buffer_free(handle, arg);
+    return rc;
+}
+
+int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value)
+{
+    return xc_hvm_param_set(handle, dom, param, value);
+}
+
+int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value)
+{
+    uint64_t v;
+    int ret;
+
+    ret = xc_hvm_param_get(handle, dom, param, &v);
+    if (ret < 0)
+        return ret;
+    *value = v;
+    return 0;
+}
+
+int xc_domain_setdebugging(xc_interface *xch,
+                           uint32_t domid,
+                           unsigned int enable)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_setdebugging;
+    domctl.domain = domid;
+    domctl.u.setdebugging.enable = enable;
+    return do_domctl(xch, &domctl);
+}
+
+int xc_assign_device(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t machine_sbdf,
+    uint32_t flags)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_assign_device;
+    domctl.domain = domid;
+    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_PCI;
+    domctl.u.assign_device.u.pci.machine_sbdf = machine_sbdf;
+    domctl.u.assign_device.flags = flags;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_get_device_group(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t machine_sbdf,
+    uint32_t max_sdevs,
+    uint32_t *num_sdevs,
+    uint32_t *sdev_array)
+{
+    int rc;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(sdev_array, max_sdevs * sizeof(*sdev_array), XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, sdev_array) )
+    {
+        PERROR("Could not bounce buffer for xc_get_device_group");
+        return -1;
+    }
+
+    domctl.cmd = XEN_DOMCTL_get_device_group;
+    domctl.domain = domid;
+
+    domctl.u.get_device_group.machine_sbdf = machine_sbdf;
+    domctl.u.get_device_group.max_sdevs = max_sdevs;
+
+    set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array);
+
+    rc = do_domctl(xch, &domctl);
+
+    *num_sdevs = domctl.u.get_device_group.num_sdevs;
+
+    xc_hypercall_bounce_post(xch, sdev_array);
+
+    return rc;
+}
+
+int xc_test_assign_device(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t machine_sbdf)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_test_assign_device;
+    domctl.domain = domid;
+    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_PCI;
+    domctl.u.assign_device.u.pci.machine_sbdf = machine_sbdf;
+    domctl.u.assign_device.flags = 0;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_deassign_device(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t machine_sbdf)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_deassign_device;
+    domctl.domain = domid;
+    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_PCI;
+    domctl.u.assign_device.u.pci.machine_sbdf = machine_sbdf;
+    domctl.u.assign_device.flags = 0;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_assign_dt_device(
+    xc_interface *xch,
+    uint32_t domid,
+    char *path)
+{
+    int rc;
+    size_t size = strlen(path);
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(path, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, path) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_assign_device;
+    domctl.domain = domid;
+
+    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
+    domctl.u.assign_device.u.dt.size = size;
+    /*
+     * DT doesn't own any RDM so actually DT has nothing to do
+     * for any flag and here just fix that as 0.
+     */
+    domctl.u.assign_device.flags = 0;
+    set_xen_guest_handle(domctl.u.assign_device.u.dt.path, path);
+
+    rc = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, path);
+
+    return rc;
+}
+
+int xc_test_assign_dt_device(
+    xc_interface *xch,
+    uint32_t domid,
+    char *path)
+{
+    int rc;
+    size_t size = strlen(path);
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(path, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, path) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_test_assign_device;
+    domctl.domain = domid;
+
+    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
+    domctl.u.assign_device.u.dt.size = size;
+    set_xen_guest_handle(domctl.u.assign_device.u.dt.path, path);
+    domctl.u.assign_device.flags = 0;
+
+    rc = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, path);
+
+    return rc;
+}
+
+int xc_deassign_dt_device(
+    xc_interface *xch,
+    uint32_t domid,
+    char *path)
+{
+    int rc;
+    size_t size = strlen(path);
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(path, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, path) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_deassign_device;
+    domctl.domain = domid;
+
+    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
+    domctl.u.assign_device.u.dt.size = size;
+    set_xen_guest_handle(domctl.u.assign_device.u.dt.path, path);
+    domctl.u.assign_device.flags = 0;
+
+    rc = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, path);
+
+    return rc;
+}
+
+
+
+
+int xc_domain_update_msi_irq(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t gvec,
+    uint32_t pirq,
+    uint32_t gflags,
+    uint64_t gtable)
+{
+    int rc;
+    struct xen_domctl_bind_pt_irq *bind;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_bind_pt_irq;
+    domctl.domain = domid;
+
+    bind = &(domctl.u.bind_pt_irq);
+    bind->irq_type = PT_IRQ_TYPE_MSI;
+    bind->machine_irq = pirq;
+    bind->u.msi.gvec = gvec;
+    bind->u.msi.gflags = gflags;
+    bind->u.msi.gtable = gtable;
+
+    rc = do_domctl(xch, &domctl);
+    return rc;
+}
+
+int xc_domain_unbind_msi_irq(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t gvec,
+    uint32_t pirq,
+    uint32_t gflags)
+{
+    int rc;
+    struct xen_domctl_bind_pt_irq *bind;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
+    domctl.domain = domid;
+
+    bind = &(domctl.u.bind_pt_irq);
+    bind->irq_type = PT_IRQ_TYPE_MSI;
+    bind->machine_irq = pirq;
+    bind->u.msi.gvec = gvec;
+    bind->u.msi.gflags = gflags;
+
+    rc = do_domctl(xch, &domctl);
+    return rc;
+}
+
+/* Pass-through: binds machine irq to guests irq */
+static int xc_domain_bind_pt_irq_int(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t machine_irq,
+    uint8_t irq_type,
+    uint8_t bus,
+    uint8_t device,
+    uint8_t intx,
+    uint8_t isa_irq,
+    uint16_t spi)
+{
+    int rc;
+    struct xen_domctl_bind_pt_irq *bind;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_bind_pt_irq;
+    domctl.domain = domid;
+
+    bind = &(domctl.u.bind_pt_irq);
+    bind->irq_type = irq_type;
+    bind->machine_irq = machine_irq;
+    switch ( irq_type )
+    {
+    case PT_IRQ_TYPE_PCI:
+    case PT_IRQ_TYPE_MSI_TRANSLATE:
+        bind->u.pci.bus = bus;
+        bind->u.pci.device = device;
+        bind->u.pci.intx = intx;
+        break;
+    case PT_IRQ_TYPE_ISA:
+        bind->u.isa.isa_irq = isa_irq;
+        break;
+    case PT_IRQ_TYPE_SPI:
+        bind->u.spi.spi = spi;
+        break;
+    default:
+        errno = EINVAL;
+        return -1;
+    }
+
+    rc = do_domctl(xch, &domctl);
+    return rc;
+}
+
+int xc_domain_bind_pt_irq(
+    xc_interface *xch,
+    uint32_t domid,
+    uint8_t machine_irq,
+    uint8_t irq_type,
+    uint8_t bus,
+    uint8_t device,
+    uint8_t intx,
+    uint8_t isa_irq)
+{
+    return xc_domain_bind_pt_irq_int(xch, domid, machine_irq, irq_type,
+                                     bus, device, intx, isa_irq, 0);
+}
+
+static int xc_domain_unbind_pt_irq_int(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t machine_irq,
+    uint8_t irq_type,
+    uint8_t bus,
+    uint8_t device,
+    uint8_t intx,
+    uint8_t isa_irq,
+    uint8_t spi)
+{
+    int rc;
+    struct xen_domctl_bind_pt_irq *bind;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
+    domctl.domain = domid;
+
+    bind = &(domctl.u.bind_pt_irq);
+    bind->irq_type = irq_type;
+    bind->machine_irq = machine_irq;
+    switch ( irq_type )
+    {
+    case PT_IRQ_TYPE_PCI:
+    case PT_IRQ_TYPE_MSI_TRANSLATE:
+        bind->u.pci.bus = bus;
+        bind->u.pci.device = device;
+        bind->u.pci.intx = intx;
+        break;
+    case PT_IRQ_TYPE_ISA:
+        bind->u.isa.isa_irq = isa_irq;
+        break;
+    case PT_IRQ_TYPE_SPI:
+        bind->u.spi.spi = spi;
+        break;
+    default:
+        errno = EINVAL;
+        return -1;
+    }
+
+    rc = do_domctl(xch, &domctl);
+    return rc;
+}
+
+int xc_domain_unbind_pt_irq(
+    xc_interface *xch,
+    uint32_t domid,
+    uint8_t machine_irq,
+    uint8_t irq_type,
+    uint8_t bus,
+    uint8_t device,
+    uint8_t intx,
+    uint8_t isa_irq)
+{
+    return xc_domain_unbind_pt_irq_int(xch, domid, machine_irq, irq_type,
+                                       bus, device, intx, isa_irq, 0);
+}
+
+int xc_domain_bind_pt_pci_irq(
+    xc_interface *xch,
+    uint32_t domid,
+    uint8_t machine_irq,
+    uint8_t bus,
+    uint8_t device,
+    uint8_t intx)
+{
+
+    return (xc_domain_bind_pt_irq(xch, domid, machine_irq,
+                                  PT_IRQ_TYPE_PCI, bus, device, intx, 0));
+}
+
+int xc_domain_bind_pt_isa_irq(
+    xc_interface *xch,
+    uint32_t domid,
+    uint8_t machine_irq)
+{
+
+    return (xc_domain_bind_pt_irq(xch, domid, machine_irq,
+                                  PT_IRQ_TYPE_ISA, 0, 0, 0, machine_irq));
+}
+
+int xc_domain_bind_pt_spi_irq(
+    xc_interface *xch,
+    uint32_t domid,
+    uint16_t vspi,
+    uint16_t spi)
+{
+    return (xc_domain_bind_pt_irq_int(xch, domid, vspi,
+                                      PT_IRQ_TYPE_SPI, 0, 0, 0, 0, spi));
+}
+
+int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
+                                uint32_t domid,
+                                uint16_t vspi,
+                                uint16_t spi)
+{
+    return (xc_domain_unbind_pt_irq_int(xch, domid, vspi,
+                                        PT_IRQ_TYPE_SPI, 0, 0, 0, 0, spi));
+}
+
+int xc_domain_memory_mapping(
+    xc_interface *xch,
+    uint32_t domid,
+    unsigned long first_gfn,
+    unsigned long first_mfn,
+    unsigned long nr_mfns,
+    uint32_t add_mapping)
+{
+    DECLARE_DOMCTL;
+    xc_dominfo_t info;
+    int ret = 0, rc;
+    unsigned long done = 0, nr, max_batch_sz;
+
+    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 ||
+         info.domid != domid )
+    {
+        PERROR("Could not get info for domain");
+        return -EINVAL;
+    }
+    if ( !xc_core_arch_auto_translated_physmap(&info) )
+        return 0;
+
+    if ( !nr_mfns )
+        return 0;
+
+    domctl.cmd = XEN_DOMCTL_memory_mapping;
+    domctl.domain = domid;
+    domctl.u.memory_mapping.add_mapping = add_mapping;
+    max_batch_sz = nr_mfns;
+    do
+    {
+        nr = min_t(unsigned long, nr_mfns - done, max_batch_sz);
+        domctl.u.memory_mapping.nr_mfns = nr;
+        domctl.u.memory_mapping.first_gfn = first_gfn + done;
+        domctl.u.memory_mapping.first_mfn = first_mfn + done;
+        rc = do_domctl(xch, &domctl);
+        if ( rc < 0 && errno == E2BIG )
+        {
+            if ( max_batch_sz <= 1 )
+                break;
+            max_batch_sz >>= 1;
+            continue;
+        }
+        if ( rc > 0 )
+        {
+            done += rc;
+            continue;
+        }
+        /* Save the first error... */
+        if ( !ret )
+            ret = rc;
+        /* .. and ignore the rest of them when removing. */
+        if ( rc && add_mapping != DPCI_REMOVE_MAPPING )
+            break;
+
+        done += nr;
+    } while ( done < nr_mfns );
+
+    /*
+     * Undo what we have done unless unmapping, by unmapping the entire region.
+     * Errors here are ignored.
+     */
+    if ( ret && add_mapping != DPCI_REMOVE_MAPPING )
+        xc_domain_memory_mapping(xch, domid, first_gfn, first_mfn, nr_mfns,
+                                 DPCI_REMOVE_MAPPING);
+
+    /* We might get E2BIG so many times that we never advance. */
+    if ( !done && !ret )
+        ret = -1;
+
+    return ret;
+}
+
+int xc_domain_ioport_mapping(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t first_gport,
+    uint32_t first_mport,
+    uint32_t nr_ports,
+    uint32_t add_mapping)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_ioport_mapping;
+    domctl.domain = domid;
+    domctl.u.ioport_mapping.first_gport = first_gport;
+    domctl.u.ioport_mapping.first_mport = first_mport;
+    domctl.u.ioport_mapping.nr_ports = nr_ports;
+    domctl.u.ioport_mapping.add_mapping = add_mapping;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_set_target(
+    xc_interface *xch,
+    uint32_t domid,
+    uint32_t target)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_set_target;
+    domctl.domain = domid;
+    domctl.u.set_target.target = target;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_subscribe_for_suspend(
+    xc_interface *xch, uint32_t dom, evtchn_port_t port)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_subscribe;
+    domctl.domain = dom;
+    domctl.u.subscribe.port = port;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_debug_control(xc_interface *xc, uint32_t domid, uint32_t sop, uint32_t vcpu)
+{
+    DECLARE_DOMCTL;
+
+    memset(&domctl, 0, sizeof(domctl));
+    domctl.domain = domid;
+    domctl.cmd = XEN_DOMCTL_debug_op;
+    domctl.u.debug_op.op     = sop;
+    domctl.u.debug_op.vcpu   = vcpu;
+
+    return do_domctl(xc, &domctl);
+}
+
+int xc_domain_p2m_audit(xc_interface *xch, 
+                        uint32_t domid,
+                        uint64_t *orphans,
+                        uint64_t *m2p_bad,   
+                        uint64_t *p2m_bad)
+{
+    DECLARE_DOMCTL;
+    int rc;
+
+    domctl.cmd = XEN_DOMCTL_audit_p2m;
+    domctl.domain = domid;
+    rc = do_domctl(xch, &domctl);
+
+    *orphans = domctl.u.audit_p2m.orphans;
+    *m2p_bad = domctl.u.audit_p2m.m2p_bad;
+    *p2m_bad = domctl.u.audit_p2m.p2m_bad;
+
+    return rc;
+}
+
+int xc_domain_set_access_required(xc_interface *xch,
+                                  uint32_t domid,
+                                  unsigned int required)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_set_access_required;
+    domctl.domain = domid;
+    domctl.u.access_required.access_required = required;
+    return do_domctl(xch, &domctl);
+}
+
+int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_set_virq_handler;
+    domctl.domain = domid;
+    domctl.u.set_virq_handler.virq = virq;
+    return do_domctl(xch, &domctl);
+}
+
+/* Plumbing Xen with vNUMA topology */
+int xc_domain_setvnuma(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t nr_vnodes,
+                       uint32_t nr_vmemranges,
+                       uint32_t nr_vcpus,
+                       xen_vmemrange_t *vmemrange,
+                       unsigned int *vdistance,
+                       unsigned int *vcpu_to_vnode,
+                       unsigned int *vnode_to_pnode)
+{
+    int rc;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * nr_vmemranges,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
+                             nr_vnodes * nr_vnodes,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_HYPERCALL_BOUNCE(vnode_to_pnode, sizeof(*vnode_to_pnode) *
+                             nr_vnodes,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    errno = EINVAL;
+
+    if ( nr_vnodes == 0 || nr_vmemranges == 0 || nr_vcpus == 0 )
+        return -1;
+
+    if ( !vdistance || !vcpu_to_vnode || !vmemrange || !vnode_to_pnode )
+    {
+        PERROR("%s: Cant set vnuma without initializing topology", __func__);
+        return -1;
+    }
+
+    if ( xc_hypercall_bounce_pre(xch, vmemrange)      ||
+         xc_hypercall_bounce_pre(xch, vdistance)      ||
+         xc_hypercall_bounce_pre(xch, vcpu_to_vnode)  ||
+         xc_hypercall_bounce_pre(xch, vnode_to_pnode) )
+    {
+        rc = -1;
+        goto vnumaset_fail;
+
+    }
+
+    set_xen_guest_handle(domctl.u.vnuma.vmemrange, vmemrange);
+    set_xen_guest_handle(domctl.u.vnuma.vdistance, vdistance);
+    set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpu_to_vnode);
+    set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vnode_to_pnode);
+
+    domctl.cmd = XEN_DOMCTL_setvnumainfo;
+    domctl.domain = domid;
+    domctl.u.vnuma.nr_vnodes = nr_vnodes;
+    domctl.u.vnuma.nr_vmemranges = nr_vmemranges;
+    domctl.u.vnuma.nr_vcpus = nr_vcpus;
+    domctl.u.vnuma.pad = 0;
+
+    rc = do_domctl(xch, &domctl);
+
+ vnumaset_fail:
+    xc_hypercall_bounce_post(xch, vmemrange);
+    xc_hypercall_bounce_post(xch, vdistance);
+    xc_hypercall_bounce_post(xch, vcpu_to_vnode);
+    xc_hypercall_bounce_post(xch, vnode_to_pnode);
+
+    return rc;
+}
+
+int xc_domain_getvnuma(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t *nr_vnodes,
+                       uint32_t *nr_vmemranges,
+                       uint32_t *nr_vcpus,
+                       xen_vmemrange_t *vmemrange,
+                       unsigned int *vdistance,
+                       unsigned int *vcpu_to_vnode)
+{
+    int rc;
+    DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * *nr_vmemranges,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
+                             *nr_vnodes * *nr_vnodes,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * *nr_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    struct xen_vnuma_topology_info vnuma_topo;
+
+    if ( xc_hypercall_bounce_pre(xch, vmemrange)      ||
+         xc_hypercall_bounce_pre(xch, vdistance)      ||
+         xc_hypercall_bounce_pre(xch, vcpu_to_vnode) )
+    {
+        rc = -1;
+        errno = ENOMEM;
+        goto vnumaget_fail;
+    }
+
+    set_xen_guest_handle(vnuma_topo.vmemrange.h, vmemrange);
+    set_xen_guest_handle(vnuma_topo.vdistance.h, vdistance);
+    set_xen_guest_handle(vnuma_topo.vcpu_to_vnode.h, vcpu_to_vnode);
+
+    vnuma_topo.nr_vnodes = *nr_vnodes;
+    vnuma_topo.nr_vcpus = *nr_vcpus;
+    vnuma_topo.nr_vmemranges = *nr_vmemranges;
+    vnuma_topo.domid = domid;
+    vnuma_topo.pad = 0;
+
+    rc = do_memory_op(xch, XENMEM_get_vnumainfo, &vnuma_topo,
+                      sizeof(vnuma_topo));
+
+    *nr_vnodes = vnuma_topo.nr_vnodes;
+    *nr_vcpus = vnuma_topo.nr_vcpus;
+    *nr_vmemranges = vnuma_topo.nr_vmemranges;
+
+ vnumaget_fail:
+    xc_hypercall_bounce_post(xch, vmemrange);
+    xc_hypercall_bounce_post(xch, vdistance);
+    xc_hypercall_bounce_post(xch, vcpu_to_vnode);
+
+    return rc;
+}
+
+int xc_domain_soft_reset(xc_interface *xch,
+                         uint32_t domid)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_soft_reset;
+    domctl.domain = domid;
+    return do_domctl(xch, &domctl);
+}
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_evtchn.c b/tools/libs/ctrl/xc_evtchn.c
new file mode 100644 (file)
index 0000000..614786d
--- /dev/null
@@ -0,0 +1,85 @@
+/******************************************************************************
+ * xc_evtchn.c
+ *
+ * API for manipulating and accessing inter-domain event channels.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (c) 2004, K A Fraser.
+ */
+
+#include "xc_private.h"
+
+static int do_evtchn_op(xc_interface *xch, int cmd, void *arg,
+                        size_t arg_size, int silently_fail)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BOUNCE(arg, arg_size, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, arg) )
+    {
+        PERROR("do_evtchn_op: bouncing arg failed");
+        goto out;
+    }
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_event_channel_op,
+                   cmd, HYPERCALL_BUFFER_AS_ARG(arg));
+    if ( ret < 0 && !silently_fail )
+        ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret);
+
+    xc_hypercall_bounce_post(xch, arg);
+ out:
+    return ret;
+}
+
+xc_evtchn_port_or_error_t
+xc_evtchn_alloc_unbound(xc_interface *xch,
+                        uint32_t dom,
+                        uint32_t remote_dom)
+{
+    int rc;
+    struct evtchn_alloc_unbound arg = {
+        .dom        = dom,
+        .remote_dom = remote_dom,
+    };
+
+    rc = do_evtchn_op(xch, EVTCHNOP_alloc_unbound, &arg, sizeof(arg), 0);
+    if ( rc == 0 )
+        rc = arg.port;
+
+    return rc;
+}
+
+int xc_evtchn_reset(xc_interface *xch,
+                    uint32_t dom)
+{
+    struct evtchn_reset arg = { .dom = dom };
+    return do_evtchn_op(xch, EVTCHNOP_reset, &arg, sizeof(arg), 0);
+}
+
+int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status)
+{
+    return do_evtchn_op(xch, EVTCHNOP_status, status,
+                        sizeof(*status), 1);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_evtchn_compat.c b/tools/libs/ctrl/xc_evtchn_compat.c
new file mode 100644 (file)
index 0000000..82baf14
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Compat shims for use of 3rd party consumers of libxenctrl xc_evtchn
+ * functionality which has been split into separate libraries.
+ */
+
+#include <xenevtchn.h>
+
+#define XC_WANT_COMPAT_EVTCHN_API
+#include "xenctrl.h"
+
+xc_evtchn *xc_evtchn_open(xentoollog_logger *logger,
+                          unsigned open_flags)
+{
+    return xenevtchn_open(logger, open_flags);
+}
+
+int xc_evtchn_close(xc_evtchn *xce)
+{
+    return xenevtchn_close(xce);
+}
+
+int xc_evtchn_fd(xc_evtchn *xce)
+{
+    return xenevtchn_fd(xce);
+}
+
+int xc_evtchn_notify(xc_evtchn *xce, evtchn_port_t port)
+{
+    return xenevtchn_notify(xce, port);
+}
+
+evtchn_port_or_error_t
+xc_evtchn_bind_unbound_port(xc_evtchn *xce, uint32_t domid)
+{
+    return xenevtchn_bind_unbound_port(xce, domid);
+}
+
+evtchn_port_or_error_t
+xc_evtchn_bind_interdomain(xc_evtchn *xce, uint32_t domid,
+                           evtchn_port_t remote_port)
+{
+    return xenevtchn_bind_interdomain(xce, domid, remote_port);
+}
+
+evtchn_port_or_error_t
+xc_evtchn_bind_virq(xc_evtchn *xce, unsigned int virq)
+{
+    return xenevtchn_bind_virq(xce, virq);
+}
+
+int xc_evtchn_unbind(xc_evtchn *xce, evtchn_port_t port)
+{
+    return xenevtchn_unbind(xce, port);
+}
+
+evtchn_port_or_error_t
+xc_evtchn_pending(xc_evtchn *xce)
+{
+    return xenevtchn_pending(xce);
+}
+
+int xc_evtchn_unmask(xc_evtchn *xce, evtchn_port_t port)
+{
+    return xenevtchn_unmask(xce, port);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_flask.c b/tools/libs/ctrl/xc_flask.c
new file mode 100644 (file)
index 0000000..c1652ba
--- /dev/null
@@ -0,0 +1,450 @@
+/******************************************************************************
+ * xc_flask.c
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+#include <unistd.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/ioctl.h>
+
+#define OCON_ISID    0    /* initial SIDs */
+#define OCON_PIRQ    1    /* physical irqs */
+#define OCON_IOPORT  2    /* io ports */
+#define OCON_IOMEM   3    /* io memory */
+#define OCON_DEVICE  4    /* pci devices */
+#define INITCONTEXTLEN  256
+
+int xc_flask_op(xc_interface *xch, xen_flask_op_t *op)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BOUNCE(op, sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    op->interface_version = XEN_FLASK_INTERFACE_VERSION;
+
+    if ( xc_hypercall_bounce_pre(xch, op) )
+    {
+        PERROR("Could not bounce memory for flask op hypercall");
+        goto out;
+    }
+
+    ret = xencall1(xch->xcall, __HYPERVISOR_xsm_op,
+                   HYPERCALL_BUFFER_AS_ARG(op));
+    if ( ret < 0 )
+    {
+        if ( errno == EACCES )
+            fprintf(stderr, "XSM operation failed!\n");
+    }
+
+    xc_hypercall_bounce_post(xch, op);
+
+ out:
+    return ret;
+}
+
+int xc_flask_load(xc_interface *xch, char *buf, uint32_t size)
+{
+    int err;
+    DECLARE_FLASK_OP;
+    DECLARE_HYPERCALL_BOUNCE(buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    if ( xc_hypercall_bounce_pre(xch, buf) )
+    {
+        PERROR("Could not bounce memory for flask op hypercall");
+        return -1;
+    }
+
+    op.cmd = FLASK_LOAD;
+    op.u.load.size = size;
+    set_xen_guest_handle(op.u.load.buffer, buf);
+    
+    err = xc_flask_op(xch, &op);
+
+    xc_hypercall_bounce_post(xch, buf);
+
+    return err;
+}
+
+int xc_flask_context_to_sid(xc_interface *xch, char *buf, uint32_t size, uint32_t *sid)
+{
+    int err;
+    DECLARE_FLASK_OP;
+    DECLARE_HYPERCALL_BOUNCE(buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, buf) )
+    {
+        PERROR("Could not bounce memory for flask op hypercall");
+        return -1;
+    }
+
+    op.cmd = FLASK_CONTEXT_TO_SID;
+    op.u.sid_context.size = size;
+    set_xen_guest_handle(op.u.sid_context.context, buf);
+    
+    err = xc_flask_op(xch, &op);
+
+    if ( !err )
+        *sid = op.u.sid_context.sid;
+
+    xc_hypercall_bounce_post(xch, buf);
+
+    return err;
+}
+
+int xc_flask_sid_to_context(xc_interface *xch, int sid, char *buf, uint32_t size)
+{
+    int err;
+    DECLARE_FLASK_OP;
+    DECLARE_HYPERCALL_BOUNCE(buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, buf) )
+    {
+        PERROR("Could not bounce memory for flask op hypercall");
+        return -1;
+    }
+
+    op.cmd = FLASK_SID_TO_CONTEXT;
+    op.u.sid_context.sid = sid;
+    op.u.sid_context.size = size;
+    set_xen_guest_handle(op.u.sid_context.context, buf);
+    
+    err = xc_flask_op(xch, &op);
+
+    xc_hypercall_bounce_post(xch, buf);
+   
+    return err;
+}
+
+int xc_flask_getenforce(xc_interface *xch)
+{
+    DECLARE_FLASK_OP;
+    op.cmd = FLASK_GETENFORCE;
+    
+    return xc_flask_op(xch, &op);
+}
+
+int xc_flask_setenforce(xc_interface *xch, int mode)
+{
+    DECLARE_FLASK_OP;
+    op.cmd = FLASK_SETENFORCE;
+    op.u.enforce.enforcing = mode;
+   
+    return xc_flask_op(xch, &op);
+}
+
+int xc_flask_getbool_byid(xc_interface *xch, int id, char *name, uint32_t size, int *curr, int *pend)
+{
+    int rv;
+    DECLARE_FLASK_OP;
+    DECLARE_HYPERCALL_BOUNCE(name, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, name) )
+    {
+        PERROR("Could not bounce memory for flask op hypercall");
+        return -1;
+    }
+
+    op.cmd = FLASK_GETBOOL;
+    op.u.boolean.bool_id = id;
+    op.u.boolean.size = size;
+    set_xen_guest_handle(op.u.boolean.name, name);
+
+    rv = xc_flask_op(xch, &op);
+
+    xc_hypercall_bounce_post(xch, name);
+
+    if ( rv )
+        return rv;
+    
+    if ( curr )
+        *curr = op.u.boolean.enforcing;
+    if ( pend )
+        *pend = op.u.boolean.pending;
+
+    return rv;
+}
+
+int xc_flask_getbool_byname(xc_interface *xch, char *name, int *curr, int *pend)
+{
+    int rv;
+    DECLARE_FLASK_OP;
+    DECLARE_HYPERCALL_BOUNCE(name, strlen(name), XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, name) )
+    {
+        PERROR("Could not bounce memory for flask op hypercall");
+        return -1;
+    }
+
+    op.cmd = FLASK_GETBOOL;
+    op.u.boolean.bool_id = -1;
+    op.u.boolean.size = strlen(name);
+    set_xen_guest_handle(op.u.boolean.name, name);
+
+    rv = xc_flask_op(xch, &op);
+
+    xc_hypercall_bounce_post(xch, name);
+
+    if ( rv )
+        return rv;
+    
+    if ( curr )
+        *curr = op.u.boolean.enforcing;
+    if ( pend )
+        *pend = op.u.boolean.pending;
+
+    return rv;
+}
+
+int xc_flask_setbool(xc_interface *xch, char *name, int value, int commit)
+{
+    int rv;
+    DECLARE_FLASK_OP;
+    DECLARE_HYPERCALL_BOUNCE(name, strlen(name), XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, name) )
+    {
+        PERROR("Could not bounce memory for flask op hypercall");
+        return -1;
+    }
+
+    op.cmd = FLASK_SETBOOL;
+    op.u.boolean.bool_id = -1;
+    op.u.boolean.new_value = value;
+    op.u.boolean.commit = 1;
+    op.u.boolean.size = strlen(name);
+    set_xen_guest_handle(op.u.boolean.name, name);
+
+    rv = xc_flask_op(xch, &op);
+
+    xc_hypercall_bounce_post(xch, name);
+
+    return rv;
+}
+
+
+static int xc_flask_add(xc_interface *xch, uint32_t ocon, uint64_t low, uint64_t high, char *scontext)
+{
+    uint32_t sid;
+    int err;
+    DECLARE_FLASK_OP;
+
+    err = xc_flask_context_to_sid(xch, scontext, strlen(scontext), &sid);
+    if ( err )
+        return err;
+
+    op.cmd = FLASK_ADD_OCONTEXT;
+    op.u.ocontext.ocon = ocon;
+    op.u.ocontext.sid = sid;
+    op.u.ocontext.low = low;
+    op.u.ocontext.high = high;
+    
+    return xc_flask_op(xch, &op);
+}
+
+int xc_flask_add_pirq(xc_interface *xch, unsigned int pirq, char *scontext)
+{
+    return xc_flask_add(xch, OCON_PIRQ, pirq, pirq, scontext);
+}
+
+int xc_flask_add_ioport(xc_interface *xch, unsigned long low, unsigned long high,
+                      char *scontext)
+{
+    return xc_flask_add(xch, OCON_IOPORT, low, high, scontext);
+}
+
+int xc_flask_add_iomem(xc_interface *xch, unsigned long low, unsigned long high,
+                     char *scontext)
+{
+    return xc_flask_add(xch, OCON_IOMEM, low, high, scontext);
+}
+
+int xc_flask_add_device(xc_interface *xch, unsigned long device, char *scontext)
+{
+    return xc_flask_add(xch, OCON_DEVICE, device, device, scontext);
+}
+
+static int xc_flask_del(xc_interface *xch, uint32_t ocon, uint64_t low, uint64_t high)
+{
+    DECLARE_FLASK_OP;
+
+    op.cmd = FLASK_DEL_OCONTEXT;
+    op.u.ocontext.ocon = ocon;
+    op.u.ocontext.low = low;
+    op.u.ocontext.high = high;
+    
+    return xc_flask_op(xch, &op);
+}
+
+int xc_flask_del_pirq(xc_interface *xch, unsigned int pirq)
+{
+    return xc_flask_del(xch, OCON_PIRQ, pirq, pirq);
+}
+
+int xc_flask_del_ioport(xc_interface *xch, unsigned long low, unsigned long high)
+{
+    return xc_flask_del(xch, OCON_IOPORT, low, high);
+}
+
+int xc_flask_del_iomem(xc_interface *xch, unsigned long low, unsigned long high)
+{
+    return xc_flask_del(xch, OCON_IOMEM, low, high);
+}
+
+int xc_flask_del_device(xc_interface *xch, unsigned long device)
+{
+    return xc_flask_del(xch, OCON_DEVICE, device, device);
+}
+
+int xc_flask_access(xc_interface *xch, const char *scon, const char *tcon,
+                uint16_t tclass, uint32_t req,
+                uint32_t *allowed, uint32_t *decided,
+                uint32_t *auditallow, uint32_t *auditdeny,
+                uint32_t *seqno)
+{
+    DECLARE_FLASK_OP;
+    int err;
+
+    err = xc_flask_context_to_sid(xch, (char*)scon, strlen(scon), &op.u.access.ssid);
+    if ( err )
+        return err;
+    err = xc_flask_context_to_sid(xch, (char*)tcon, strlen(tcon), &op.u.access.tsid);
+    if ( err )
+        return err;
+
+    op.cmd = FLASK_ACCESS;
+    op.u.access.tclass = tclass;
+    op.u.access.req = req;
+    
+    err = xc_flask_op(xch, &op);
+
+    if ( err )
+        return err;
+
+    if ( allowed )
+        *allowed = op.u.access.allowed;
+    if ( decided )
+        *decided = 0xffffffff;
+    if ( auditallow )
+        *auditallow = op.u.access.audit_allow;
+    if ( auditdeny )
+        *auditdeny = op.u.access.audit_deny;
+    if ( seqno )
+        *seqno = op.u.access.seqno;
+
+    if ( (op.u.access.allowed & req) != req )
+        err = -EPERM;
+
+    return err;
+}
+
+int xc_flask_avc_hashstats(xc_interface *xch, char *buf, int size)
+{
+    int err;
+    DECLARE_FLASK_OP;
+  
+    op.cmd = FLASK_AVC_HASHSTATS;
+  
+    err = xc_flask_op(xch, &op);
+
+    snprintf(buf, size,
+             "entries: %d\nbuckets used: %d/%d\nlongest chain: %d\n",
+             op.u.hash_stats.entries, op.u.hash_stats.buckets_used,
+             op.u.hash_stats.buckets_total, op.u.hash_stats.max_chain_len);
+
+    return err;
+}
+
+int xc_flask_avc_cachestats(xc_interface *xch, char *buf, int size)
+{
+    int err, n;
+    int i = 0;
+    DECLARE_FLASK_OP;
+
+    n = snprintf(buf, size, "lookups hits misses allocations reclaims frees\n");
+    buf += n;
+    size -= n;
+  
+    op.cmd = FLASK_AVC_CACHESTATS;
+    while ( size > 0 )
+    {
+        op.u.cache_stats.cpu = i;
+        err = xc_flask_op(xch, &op);
+        if ( err && errno == ENOENT )
+            return 0;
+        if ( err )
+            return err;
+        n = snprintf(buf, size, "%u %u %u %u %u %u\n",
+                     op.u.cache_stats.lookups, op.u.cache_stats.hits,
+                     op.u.cache_stats.misses, op.u.cache_stats.allocations,
+                     op.u.cache_stats.reclaims, op.u.cache_stats.frees);
+        buf += n;
+        size -= n;
+        i++;
+    }
+
+    return 0;
+}
+
+int xc_flask_policyvers(xc_interface *xch)
+{
+    DECLARE_FLASK_OP;
+    op.cmd = FLASK_POLICYVERS;
+
+    return xc_flask_op(xch, &op);
+}
+
+int xc_flask_getavc_threshold(xc_interface *xch)
+{
+    DECLARE_FLASK_OP;
+    op.cmd = FLASK_GETAVC_THRESHOLD;
+    
+    return xc_flask_op(xch, &op);
+}
+
+int xc_flask_setavc_threshold(xc_interface *xch, int threshold)
+{
+    DECLARE_FLASK_OP;
+    op.cmd = FLASK_SETAVC_THRESHOLD;
+    op.u.setavc_threshold.threshold = threshold;
+
+    return xc_flask_op(xch, &op);
+}
+
+int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid)
+{
+    DECLARE_FLASK_OP;
+    op.cmd = FLASK_RELABEL_DOMAIN;
+    op.u.relabel.domid = domid;
+    op.u.relabel.sid = sid;
+
+    return xc_flask_op(xch, &op);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_foreign_memory.c b/tools/libs/ctrl/xc_foreign_memory.c
new file mode 100644 (file)
index 0000000..4053d26
--- /dev/null
@@ -0,0 +1,98 @@
+/******************************************************************************
+ * xc_foreign_memory.c
+ *
+ * Functions for mapping foreign domain's memory.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define XC_BUILDING_COMPAT_MAP_FOREIGN_API
+#include "xc_private.h"
+
+void *xc_map_foreign_pages(xc_interface *xch, uint32_t dom, int prot,
+                           const xen_pfn_t *arr, int num)
+{
+    if (num < 0) {
+        errno = EINVAL;
+        return NULL;
+    }
+
+    return xenforeignmemory_map(xch->fmem, dom, prot, num, arr, NULL);
+}
+
+void *xc_map_foreign_range(xc_interface *xch,
+                           uint32_t dom, int size, int prot,
+                           unsigned long mfn)
+{
+    xen_pfn_t *arr;
+    int num;
+    int i;
+    void *ret;
+
+    num = (size + XC_PAGE_SIZE - 1) >> XC_PAGE_SHIFT;
+    arr = calloc(num, sizeof(xen_pfn_t));
+    if ( arr == NULL )
+        return NULL;
+
+    for ( i = 0; i < num; i++ )
+        arr[i] = mfn + i;
+
+    ret = xc_map_foreign_pages(xch, dom, prot, arr, num);
+    free(arr);
+    return ret;
+}
+
+void *xc_map_foreign_ranges(xc_interface *xch,
+                            uint32_t dom, size_t size,
+                            int prot, size_t chunksize,
+                            privcmd_mmap_entry_t entries[],
+                            int nentries)
+{
+    xen_pfn_t *arr;
+    int num_per_entry;
+    int num;
+    int i;
+    int j;
+    void *ret;
+
+    num_per_entry = chunksize >> XC_PAGE_SHIFT;
+    num = num_per_entry * nentries;
+    arr = calloc(num, sizeof(xen_pfn_t));
+    if ( arr == NULL )
+        return NULL;
+
+    for ( i = 0; i < nentries; i++ )
+        for ( j = 0; j < num_per_entry; j++ )
+            arr[i * num_per_entry + j] = entries[i].mfn + j;
+
+    ret = xc_map_foreign_pages(xch, dom, prot, arr, num);
+    free(arr);
+    return ret;
+}
+
+void *xc_map_foreign_bulk(xc_interface *xch, uint32_t dom, int prot,
+                          const xen_pfn_t *arr, int *err, unsigned int num)
+{
+    return xenforeignmemory_map(xch->fmem, dom, prot, num, arr, err);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_freebsd.c b/tools/libs/ctrl/xc_freebsd.c
new file mode 100644 (file)
index 0000000..9dd48a3
--- /dev/null
@@ -0,0 +1,71 @@
+/******************************************************************************
+ *
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+/* Optionally flush file to disk and discard page cache */
+void discard_file_cache(xc_interface *xch, int fd, int flush)
+{
+    off_t cur = 0;
+    int saved_errno = errno;
+
+    if ( flush && (fsync(fd) < 0) )
+        goto out;
+
+    /*
+     * Calculate last page boundary of amount written so far
+     * unless we are flushing in which case entire cache
+     * is discarded.
+     */
+    if ( !flush )
+    {
+        if ( (cur = lseek(fd, 0, SEEK_CUR)) == (off_t)-1 )
+            cur = 0;
+        cur &= ~(XC_PAGE_SIZE-1);
+    }
+
+    /* Discard from the buffer cache. */
+    if ( posix_fadvise(fd, 0, cur, POSIX_FADV_DONTNEED) < 0 )
+        goto out;
+
+ out:
+    errno = saved_errno;
+}
+
+void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
+{
+    int ret;
+    void *ptr;
+
+    ret = posix_memalign(&ptr, alignment, size);
+    if ( ret != 0 || !ptr )
+        return NULL;
+
+    return ptr;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_gnttab.c b/tools/libs/ctrl/xc_gnttab.c
new file mode 100644 (file)
index 0000000..eb92d89
--- /dev/null
@@ -0,0 +1,161 @@
+/******************************************************************************
+ *
+ * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+int xc_gnttab_op(xc_interface *xch, int cmd, void * op, int op_size, int count)
+{
+    int ret = 0;
+    DECLARE_HYPERCALL_BOUNCE(op, count * op_size, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, op) )
+    {
+        PERROR("Could not bounce buffer for grant table op hypercall");
+        goto out1;
+    }
+
+    ret = xencall3(xch->xcall,  __HYPERVISOR_grant_table_op,
+                   cmd, HYPERCALL_BUFFER_AS_ARG(op), count);
+
+    xc_hypercall_bounce_post(xch, op);
+
+ out1:
+    return ret;
+}
+
+int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query)
+{
+    int rc;
+
+    rc = xc_gnttab_op(xch, GNTTABOP_query_size, query, sizeof(*query), 1);
+
+    if ( rc || (query->status != GNTST_okay) )
+        ERROR("Could not query dom %u's grant size\n", query->dom);
+
+    return rc;
+}
+
+int xc_gnttab_get_version(xc_interface *xch, uint32_t domid)
+{
+    struct gnttab_get_version query;
+    int rc;
+
+    query.dom = domid;
+    rc = xc_gnttab_op(xch, GNTTABOP_get_version, &query, sizeof(query),
+                      1);
+    if ( rc < 0 )
+        return rc;
+    else
+        return query.version;
+}
+
+static void *_gnttab_map_table(xc_interface *xch, uint32_t domid, int *gnt_num)
+{
+    int rc, i;
+    struct gnttab_query_size query;
+    struct gnttab_setup_table setup;
+    DECLARE_HYPERCALL_BUFFER(unsigned long, frame_list);
+    xen_pfn_t *pfn_list = NULL;
+    grant_entry_v1_t *gnt = NULL;
+
+    if ( !gnt_num )
+        return NULL;
+
+    query.dom = domid;
+    rc = xc_gnttab_op(xch, GNTTABOP_query_size, &query, sizeof(query), 1);
+
+    if ( rc || (query.status != GNTST_okay) )
+    {
+        ERROR("Could not query dom%d's grant size\n", domid);
+        return NULL;
+    }
+
+    *gnt_num = query.nr_frames * (PAGE_SIZE / sizeof(grant_entry_v1_t) );
+
+    frame_list = xc_hypercall_buffer_alloc(xch, frame_list, query.nr_frames * sizeof(unsigned long));
+    if ( !frame_list )
+    {
+        ERROR("Could not allocate frame_list in xc_gnttab_map_table\n");
+        return NULL;
+    }
+
+    pfn_list = malloc(query.nr_frames * sizeof(xen_pfn_t));
+    if ( !pfn_list )
+    {
+        ERROR("Could not allocate pfn_list in xc_gnttab_map_table\n");
+        goto err;
+    }
+
+    setup.dom = domid;
+    setup.nr_frames = query.nr_frames;
+    set_xen_guest_handle(setup.frame_list, frame_list);
+
+    /* XXX Any race with other setup_table hypercall? */
+    rc = xc_gnttab_op(xch, GNTTABOP_setup_table, &setup, sizeof(setup),
+                      1);
+
+    if ( rc || (setup.status != GNTST_okay) )
+    {
+        ERROR("Could not get grant table frame list\n");
+        goto err;
+    }
+
+    for ( i = 0; i < setup.nr_frames; i++ )
+        pfn_list[i] = frame_list[i];
+
+    gnt = xc_map_foreign_pages(xch, domid, PROT_READ, pfn_list,
+                               setup.nr_frames);
+    if ( !gnt )
+    {
+        ERROR("Could not map grant table\n");
+        goto err;
+    }
+
+err:
+    if ( frame_list )
+        xc_hypercall_buffer_free(xch, frame_list);
+    free(pfn_list);
+
+    return gnt;
+}
+
+grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid,
+                                         int *gnt_num)
+{
+    if (xc_gnttab_get_version(xch, domid) == 2)
+        return NULL;
+    return _gnttab_map_table(xch, domid, gnt_num);
+}
+
+grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid,
+                                         int *gnt_num)
+{
+    if (xc_gnttab_get_version(xch, domid) != 2)
+        return NULL;
+    return _gnttab_map_table(xch, domid, gnt_num);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_gnttab_compat.c b/tools/libs/ctrl/xc_gnttab_compat.c
new file mode 100644 (file)
index 0000000..6f036d8
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Compat shims for use of 3rd party consumers of libxenctrl xc_gnt{tab,shr}
+ * functionality which has been split into separate libraries.
+ */
+
+#include <xengnttab.h>
+
+#define XC_WANT_COMPAT_GNTTAB_API
+#include "xenctrl.h"
+
+xc_gnttab *xc_gnttab_open(xentoollog_logger *logger,
+                          unsigned open_flags)
+{
+    return xengnttab_open(logger, open_flags);
+}
+
+int xc_gnttab_close(xc_gnttab *xcg)
+{
+    return xengnttab_close(xcg);
+}
+
+void *xc_gnttab_map_grant_ref(xc_gnttab *xcg,
+                              uint32_t domid,
+                              uint32_t ref,
+                              int prot)
+{
+    return xengnttab_map_grant_ref(xcg, domid, ref, prot);
+}
+
+void *xc_gnttab_map_grant_refs(xc_gnttab *xcg,
+                               uint32_t count,
+                               uint32_t *domids,
+                               uint32_t *refs,
+                               int prot)
+{
+    return xengnttab_map_grant_refs(xcg, count, domids, refs, prot);
+}
+
+void *xc_gnttab_map_domain_grant_refs(xc_gnttab *xcg,
+                                      uint32_t count,
+                                      uint32_t domid,
+                                      uint32_t *refs,
+                                      int prot)
+{
+    return xengnttab_map_domain_grant_refs(xcg, count, domid, refs, prot);
+}
+
+void *xc_gnttab_map_grant_ref_notify(xc_gnttab *xcg,
+                                     uint32_t domid,
+                                     uint32_t ref,
+                                     int prot,
+                                     uint32_t notify_offset,
+                                     evtchn_port_t notify_port)
+{
+    return xengnttab_map_grant_ref_notify(xcg, domid, ref, prot,
+                                          notify_offset, notify_port);
+}
+
+int xc_gnttab_munmap(xc_gnttab *xcg,
+                     void *start_address,
+                     uint32_t count)
+{
+    return xengnttab_unmap(xcg, start_address, count);
+}
+
+int xc_gnttab_set_max_grants(xc_gnttab *xcg,
+                             uint32_t count)
+{
+    return xengnttab_set_max_grants(xcg, count);
+}
+
+xc_gntshr *xc_gntshr_open(xentoollog_logger *logger,
+                          unsigned open_flags)
+{
+    return xengntshr_open(logger, open_flags);
+}
+
+int xc_gntshr_close(xc_gntshr *xcg)
+{
+    return xengntshr_close(xcg);
+}
+
+void *xc_gntshr_share_pages(xc_gntshr *xcg, uint32_t domid,
+                            int count, uint32_t *refs, int writable)
+{
+    return xengntshr_share_pages(xcg, domid, count, refs, writable);
+}
+
+void *xc_gntshr_share_page_notify(xc_gntshr *xcg, uint32_t domid,
+                                  uint32_t *ref, int writable,
+                                  uint32_t notify_offset,
+                                  evtchn_port_t notify_port)
+{
+    return xengntshr_share_page_notify(xcg, domid, ref, writable,
+                                       notify_offset, notify_port);
+}
+
+int xc_gntshr_munmap(xc_gntshr *xcg, void *start_address, uint32_t count)
+{
+    return xengntshr_unshare(xcg, start_address, count);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_hcall_buf.c b/tools/libs/ctrl/xc_hcall_buf.c
new file mode 100644 (file)
index 0000000..200671f
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2010, Citrix Systems, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "xc_private.h"
+
+xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL) = {
+    .hbuf = NULL,
+    .param_shadow = NULL,
+    HYPERCALL_BUFFER_INIT_NO_BOUNCE
+};
+
+void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages)
+{
+    void *p = xencall_alloc_buffer_pages(xch->xcall, nr_pages);
+
+    if (!p)
+        return NULL;
+
+    b->hbuf = p;
+
+    return b->hbuf;
+}
+
+void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages)
+{
+    xencall_free_buffer_pages(xch->xcall, b->hbuf, nr_pages);
+}
+
+void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size)
+{
+    void *p = xencall_alloc_buffer(xch->xcall, size);
+
+    if (!p)
+        return NULL;
+
+    b->hbuf = p;
+
+    return b->hbuf;
+}
+
+void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b)
+{
+    xencall_free_buffer(xch->xcall, b->hbuf);
+}
+
+int xc__hypercall_bounce_pre(xc_interface *xch, xc_hypercall_buffer_t *b)
+{
+    void *p;
+
+    /*
+     * Catch hypercall buffer declared other than with DECLARE_HYPERCALL_BOUNCE.
+     */
+    if ( b->ubuf == (void *)-1 || b->dir == XC_HYPERCALL_BUFFER_BOUNCE_NONE )
+        abort();
+
+    /*
+     * Don't need to bounce a NULL buffer.
+     */
+    if ( b->ubuf == NULL )
+    {
+        b->hbuf = NULL;
+        return 0;
+    }
+
+    p = xc__hypercall_buffer_alloc(xch, b, b->sz);
+    if ( p == NULL )
+        return -1;
+
+    if ( b->dir == XC_HYPERCALL_BUFFER_BOUNCE_IN || b->dir == XC_HYPERCALL_BUFFER_BOUNCE_BOTH )
+        memcpy(b->hbuf, b->ubuf, b->sz);
+
+    return 0;
+}
+
+void xc__hypercall_bounce_post(xc_interface *xch, xc_hypercall_buffer_t *b)
+{
+    /*
+     * Catch hypercall buffer declared other than with DECLARE_HYPERCALL_BOUNCE.
+     */
+    if ( b->ubuf == (void *)-1 || b->dir == XC_HYPERCALL_BUFFER_BOUNCE_NONE )
+        abort();
+
+    if ( b->hbuf == NULL )
+        return;
+
+    if ( b->dir == XC_HYPERCALL_BUFFER_BOUNCE_OUT || b->dir == XC_HYPERCALL_BUFFER_BOUNCE_BOTH )
+        memcpy(b->ubuf, b->hbuf, b->sz);
+
+    xc__hypercall_buffer_free(xch, b);
+}
+
+struct xc_hypercall_buffer_array {
+    unsigned max_bufs;
+    xc_hypercall_buffer_t *bufs;
+};
+
+xc_hypercall_buffer_array_t *xc_hypercall_buffer_array_create(xc_interface *xch,
+                                                              unsigned n)
+{
+    xc_hypercall_buffer_array_t *array;
+    xc_hypercall_buffer_t *bufs = NULL;
+
+    array = malloc(sizeof(*array));
+    if ( array == NULL )
+        goto error;
+
+    bufs = calloc(n, sizeof(*bufs));
+    if ( bufs == NULL )
+        goto error;
+
+    array->max_bufs = n;
+    array->bufs     = bufs;
+
+    return array;
+
+error:
+    free(bufs);
+    free(array);
+    return NULL;
+}
+
+void *xc__hypercall_buffer_array_alloc(xc_interface *xch,
+                                       xc_hypercall_buffer_array_t *array,
+                                       unsigned index,
+                                       xc_hypercall_buffer_t *hbuf,
+                                       size_t size)
+{
+    void *buf;
+
+    if ( index >= array->max_bufs || array->bufs[index].hbuf )
+        abort();
+
+    buf = xc__hypercall_buffer_alloc(xch, hbuf, size);
+    if ( buf )
+        array->bufs[index] = *hbuf;
+    return buf;
+}
+
+void *xc__hypercall_buffer_array_get(xc_interface *xch,
+                                     xc_hypercall_buffer_array_t *array,
+                                     unsigned index,
+                                     xc_hypercall_buffer_t *hbuf)
+{
+    if ( index >= array->max_bufs || array->bufs[index].hbuf == NULL )
+        abort();
+
+    *hbuf = array->bufs[index];
+    return array->bufs[index].hbuf;
+}
+
+void xc_hypercall_buffer_array_destroy(xc_interface *xc,
+                                       xc_hypercall_buffer_array_t *array)
+{
+    unsigned i;
+
+    if ( array == NULL )
+        return;
+
+    for (i = 0; i < array->max_bufs; i++ )
+        xc__hypercall_buffer_free(xc, &array->bufs[i]);
+    free(array->bufs);
+    free(array);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_kexec.c b/tools/libs/ctrl/xc_kexec.c
new file mode 100644 (file)
index 0000000..a4e8966
--- /dev/null
@@ -0,0 +1,152 @@
+/******************************************************************************
+ * xc_kexec.c
+ *
+ * API for loading and executing kexec images.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * Copyright (C) 2013 Citrix Systems R&D Ltd.
+ */
+#include "xc_private.h"
+
+int xc_kexec_exec(xc_interface *xch, int type)
+{
+    DECLARE_HYPERCALL_BUFFER(xen_kexec_exec_t, exec);
+    int ret = -1;
+
+    exec = xc_hypercall_buffer_alloc(xch, exec, sizeof(*exec));
+    if ( exec == NULL )
+    {
+        PERROR("Could not alloc bounce buffer for kexec_exec hypercall");
+        goto out;
+    }
+
+    exec->type = type;
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
+                   KEXEC_CMD_kexec,
+                   HYPERCALL_BUFFER_AS_ARG(exec));
+
+out:
+    xc_hypercall_buffer_free(xch, exec);
+
+    return ret;
+}
+
+int xc_kexec_get_range(xc_interface *xch, int range,  int nr,
+                       uint64_t *size, uint64_t *start)
+{
+    DECLARE_HYPERCALL_BUFFER(xen_kexec_range_t, get_range);
+    int ret = -1;
+
+    get_range = xc_hypercall_buffer_alloc(xch, get_range, sizeof(*get_range));
+    if ( get_range == NULL )
+    {
+        PERROR("Could not alloc bounce buffer for kexec_get_range hypercall");
+        goto out;
+    }
+
+    get_range->range = range;
+    get_range->nr = nr;
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
+                   KEXEC_CMD_kexec_get_range,
+                   HYPERCALL_BUFFER_AS_ARG(get_range));
+
+    *size = get_range->size;
+    *start = get_range->start;
+
+out:
+    xc_hypercall_buffer_free(xch, get_range);
+
+    return ret;
+}
+
+int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch,
+                  uint64_t entry_maddr,
+                  uint32_t nr_segments, xen_kexec_segment_t *segments)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BOUNCE(segments, sizeof(*segments) * nr_segments,
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BUFFER(xen_kexec_load_t, load);
+
+    if ( xc_hypercall_bounce_pre(xch, segments) )
+    {
+        PERROR("Could not allocate bounce buffer for kexec load hypercall");
+        goto out;
+    }
+    load = xc_hypercall_buffer_alloc(xch, load, sizeof(*load));
+    if ( load == NULL )
+    {
+        PERROR("Could not allocate buffer for kexec load hypercall");
+        goto out;
+    }
+
+    load->type = type;
+    load->arch = arch;
+    load->entry_maddr = entry_maddr;
+    load->nr_segments = nr_segments;
+    set_xen_guest_handle(load->segments.h, segments);
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
+                   KEXEC_CMD_kexec_load,
+                   HYPERCALL_BUFFER_AS_ARG(load));
+
+out:
+    xc_hypercall_buffer_free(xch, load);
+    xc_hypercall_bounce_post(xch, segments);
+
+    return ret;
+}
+
+int xc_kexec_unload(xc_interface *xch, int type)
+{
+    DECLARE_HYPERCALL_BUFFER(xen_kexec_unload_t, unload);
+    int ret = -1;
+
+    unload = xc_hypercall_buffer_alloc(xch, unload, sizeof(*unload));
+    if ( unload == NULL )
+    {
+        PERROR("Could not alloc buffer for kexec unload hypercall");
+        goto out;
+    }
+
+    unload->type = type;
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
+                   KEXEC_CMD_kexec_unload,
+                   HYPERCALL_BUFFER_AS_ARG(unload));
+
+out:
+    xc_hypercall_buffer_free(xch, unload);
+
+    return ret;
+}
+
+int xc_kexec_status(xc_interface *xch, int type)
+{
+    DECLARE_HYPERCALL_BUFFER(xen_kexec_status_t, status);
+    int ret = -1;
+
+    status = xc_hypercall_buffer_alloc(xch, status, sizeof(*status));
+    if ( status == NULL )
+    {
+        PERROR("Could not alloc buffer for kexec status hypercall");
+        goto out;
+    }
+
+    status->type = type;
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
+                   KEXEC_CMD_kexec_status,
+                   HYPERCALL_BUFFER_AS_ARG(status));
+
+out:
+    xc_hypercall_buffer_free(xch, status);
+
+    return ret;
+}
diff --git a/tools/libs/ctrl/xc_linux.c b/tools/libs/ctrl/xc_linux.c
new file mode 100644 (file)
index 0000000..c67c71c
--- /dev/null
@@ -0,0 +1,77 @@
+/******************************************************************************
+ *
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+/* Optionally flush file to disk and discard page cache */
+void discard_file_cache(xc_interface *xch, int fd, int flush) 
+{
+    off_t cur = 0;
+    int saved_errno = errno;
+
+    if ( flush && (fsync(fd) < 0) )
+    {
+        /*PERROR("Failed to flush file: %s", strerror(errno));*/
+        goto out;
+    }
+
+    /* 
+     * Calculate last page boundary of amount written so far 
+     * unless we are flushing in which case entire cache
+     * is discarded.
+     */
+    if ( !flush )
+    {
+        if ( (cur = lseek(fd, 0, SEEK_CUR)) == (off_t)-1 )
+            cur = 0;
+        cur &= ~(XC_PAGE_SIZE-1);
+    }
+
+    /* Discard from the buffer cache. */
+    if ( posix_fadvise64(fd, 0, cur, POSIX_FADV_DONTNEED) < 0 )
+    {
+        /*PERROR("Failed to discard cache: %s", strerror(errno));*/
+        goto out;
+    }
+
+ out:
+    errno = saved_errno;
+}
+
+void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
+{
+    int ret;
+    void *ptr;
+
+    ret = posix_memalign(&ptr, alignment, size);
+    if (ret != 0 || !ptr)
+        return NULL;
+
+    return ptr;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_mem_access.c b/tools/libs/ctrl/xc_mem_access.c
new file mode 100644 (file)
index 0000000..b452460
--- /dev/null
@@ -0,0 +1,110 @@
+/******************************************************************************
+ *
+ * tools/libxc/xc_mem_access.c
+ *
+ * Interface to low-level memory access mode functionality
+ *
+ * Copyright (c) 2011 Virtuata, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+#include <xen/memory.h>
+
+int xc_set_mem_access(xc_interface *xch,
+                      uint32_t domain_id,
+                      xenmem_access_t access,
+                      uint64_t first_pfn,
+                      uint32_t nr)
+{
+    xen_mem_access_op_t mao =
+    {
+        .op     = XENMEM_access_op_set_access,
+        .domid  = domain_id,
+        .access = access,
+        .pfn    = first_pfn,
+        .nr     = nr
+    };
+
+    return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+}
+
+int xc_set_mem_access_multi(xc_interface *xch,
+                            uint32_t domain_id,
+                            uint8_t *access,
+                            uint64_t *pages,
+                            uint32_t nr)
+{
+    DECLARE_HYPERCALL_BOUNCE(access, nr, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(pages, nr * sizeof(uint64_t),
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    int rc;
+
+    xen_mem_access_op_t mao =
+    {
+        .op       = XENMEM_access_op_set_access_multi,
+        .domid    = domain_id,
+        .access   = XENMEM_access_default + 1, /* Invalid value */
+        .pfn      = ~0UL, /* Invalid GFN */
+        .nr       = nr,
+    };
+
+    if ( xc_hypercall_bounce_pre(xch, pages) ||
+         xc_hypercall_bounce_pre(xch, access) )
+    {
+        PERROR("Could not bounce memory for XENMEM_access_op_set_access_multi");
+        return -1;
+    }
+
+    set_xen_guest_handle(mao.pfn_list, pages);
+    set_xen_guest_handle(mao.access_list, access);
+
+    rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+
+    xc_hypercall_bounce_post(xch, access);
+    xc_hypercall_bounce_post(xch, pages);
+
+    return rc;
+}
+
+int xc_get_mem_access(xc_interface *xch,
+                      uint32_t domain_id,
+                      uint64_t pfn,
+                      xenmem_access_t *access)
+{
+    int rc;
+    xen_mem_access_op_t mao =
+    {
+        .op    = XENMEM_access_op_get_access,
+        .domid = domain_id,
+        .pfn   = pfn
+    };
+
+    rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+
+    if ( rc == 0 )
+        *access = mao.access;
+
+    return rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End: 
+ */
diff --git a/tools/libs/ctrl/xc_mem_paging.c b/tools/libs/ctrl/xc_mem_paging.c
new file mode 100644 (file)
index 0000000..738f63a
--- /dev/null
@@ -0,0 +1,130 @@
+/******************************************************************************
+ *
+ * tools/libxc/xc_mem_paging.c
+ *
+ * Interface to low-level memory paging functionality.
+ *
+ * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+static int xc_mem_paging_memop(xc_interface *xch, uint32_t domain_id,
+                               unsigned int op, uint64_t gfn, void *buffer)
+{
+    xen_mem_paging_op_t mpo;
+    DECLARE_HYPERCALL_BOUNCE(buffer, XC_PAGE_SIZE,
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    int rc;
+
+    memset(&mpo, 0, sizeof(mpo));
+
+    mpo.op      = op;
+    mpo.domain  = domain_id;
+    mpo.gfn     = gfn;
+
+    if ( buffer )
+    {
+        if ( xc_hypercall_bounce_pre(xch, buffer) )
+        {
+            PERROR("Could not bounce memory for XENMEM_paging_op %u", op);
+            return -1;
+        }
+
+        set_xen_guest_handle(mpo.buffer, buffer);
+    }
+
+    rc = do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
+
+    if ( buffer )
+        xc_hypercall_bounce_post(xch, buffer);
+
+    return rc;
+}
+
+int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
+                         uint32_t *port)
+{
+    if ( !port )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    return xc_vm_event_control(xch, domain_id,
+                               XEN_VM_EVENT_ENABLE,
+                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                               port);
+}
+
+int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id)
+{
+    return xc_vm_event_control(xch, domain_id,
+                               XEN_VM_EVENT_DISABLE,
+                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                               NULL);
+}
+
+int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id)
+{
+    return xc_vm_event_control(xch, domain_id,
+                               XEN_VM_EVENT_RESUME,
+                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                               NULL);
+}
+
+int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
+{
+    return xc_mem_paging_memop(xch, domain_id,
+                               XENMEM_paging_op_nominate,
+                               gfn, NULL);
+}
+
+int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
+{
+    return xc_mem_paging_memop(xch, domain_id,
+                               XENMEM_paging_op_evict,
+                               gfn, NULL);
+}
+
+int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
+{
+    return xc_mem_paging_memop(xch, domain_id,
+                               XENMEM_paging_op_prep,
+                               gfn, NULL);
+}
+
+int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
+                       uint64_t gfn, void *buffer)
+{
+    errno = EINVAL;
+
+    if ( !buffer )
+        return -1;
+
+    return xc_mem_paging_memop(xch, domain_id, XENMEM_paging_op_prep,
+                               gfn, buffer);
+}
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End: 
+ */
diff --git a/tools/libs/ctrl/xc_memshr.c b/tools/libs/ctrl/xc_memshr.c
new file mode 100644 (file)
index 0000000..a6cfd7d
--- /dev/null
@@ -0,0 +1,289 @@
+/******************************************************************************
+ *
+ * xc_memshr.c
+ *
+ * Interface to low-level memory sharing functionality.
+ *
+ * Copyright (c) 2009 Citrix Systems, Inc. (Grzegorz Milos)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+#include <xen/memory.h>
+#include <xen/grant_table.h>
+
+int xc_memshr_control(xc_interface *xch,
+                      uint32_t domid,
+                      int enable)
+{
+    DECLARE_DOMCTL;
+    struct xen_domctl_mem_sharing_op *op;
+
+    domctl.cmd = XEN_DOMCTL_mem_sharing_op;
+    domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
+    domctl.domain = domid;
+    op = &(domctl.u.mem_sharing_op);
+    op->op = XEN_DOMCTL_MEM_SHARING_CONTROL;
+    op->u.enable = enable;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_memshr_ring_enable(xc_interface *xch,
+                          uint32_t domid,
+                          uint32_t *port)
+{
+    if ( !port )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    return xc_vm_event_control(xch, domid,
+                               XEN_VM_EVENT_ENABLE,
+                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
+                               port);
+}
+
+int xc_memshr_ring_disable(xc_interface *xch,
+                           uint32_t domid)
+{
+    return xc_vm_event_control(xch, domid,
+                               XEN_VM_EVENT_DISABLE,
+                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
+                               NULL);
+}
+
+static int xc_memshr_memop(xc_interface *xch, uint32_t domid,
+                            xen_mem_sharing_op_t *mso)
+{
+    mso->domain = domid;
+
+    return do_memory_op(xch, XENMEM_sharing_op, mso, sizeof(*mso));
+}
+
+int xc_memshr_nominate_gfn(xc_interface *xch,
+                           uint32_t domid,
+                           unsigned long gfn,
+                           uint64_t *handle)
+{
+    int rc;
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_nominate_gfn;
+    mso.u.nominate.u.gfn = gfn;
+
+    rc = xc_memshr_memop(xch, domid, &mso);
+
+    if ( !rc )
+        *handle = mso.u.nominate.handle;
+
+    return rc;
+}
+
+int xc_memshr_nominate_gref(xc_interface *xch,
+                            uint32_t domid,
+                            grant_ref_t gref,
+                            uint64_t *handle)
+{
+    int rc;
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_nominate_gref;
+    mso.u.nominate.u.grant_ref = gref;
+
+    rc = xc_memshr_memop(xch, domid, &mso);
+
+    if ( !rc )
+        *handle = mso.u.nominate.handle;
+
+    return rc;
+}
+
+int xc_memshr_share_gfns(xc_interface *xch,
+                         uint32_t source_domain,
+                         unsigned long source_gfn,
+                         uint64_t source_handle,
+                         uint32_t client_domain,
+                         unsigned long client_gfn,
+                         uint64_t client_handle)
+{
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_share;
+
+    mso.u.share.source_handle = source_handle;
+    mso.u.share.source_gfn    = source_gfn;
+    mso.u.share.client_domain = client_domain;
+    mso.u.share.client_gfn    = client_gfn;
+    mso.u.share.client_handle = client_handle;
+
+    return xc_memshr_memop(xch, source_domain, &mso);
+}
+
+int xc_memshr_share_grefs(xc_interface *xch,
+                          uint32_t source_domain,
+                          grant_ref_t source_gref,
+                          uint64_t source_handle,
+                          uint32_t client_domain,
+                          grant_ref_t client_gref,
+                          uint64_t client_handle)
+{
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_share;
+
+    mso.u.share.source_handle = source_handle;
+    XENMEM_SHARING_OP_FIELD_MAKE_GREF(mso.u.share.source_gfn, source_gref);
+    mso.u.share.client_domain = client_domain;
+    XENMEM_SHARING_OP_FIELD_MAKE_GREF(mso.u.share.client_gfn, client_gref);
+    mso.u.share.client_handle = client_handle;
+
+    return xc_memshr_memop(xch, source_domain, &mso);
+}
+
+int xc_memshr_add_to_physmap(xc_interface *xch,
+                    uint32_t source_domain,
+                    unsigned long source_gfn,
+                    uint64_t source_handle,
+                    uint32_t client_domain,
+                    unsigned long client_gfn)
+{
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_add_physmap;
+
+    mso.u.share.source_handle = source_handle;
+    mso.u.share.source_gfn    = source_gfn;
+    mso.u.share.client_domain = client_domain;
+    mso.u.share.client_gfn    = client_gfn;
+
+    return xc_memshr_memop(xch, source_domain, &mso);
+}
+
+int xc_memshr_range_share(xc_interface *xch,
+                          uint32_t source_domain,
+                          uint32_t client_domain,
+                          uint64_t first_gfn,
+                          uint64_t last_gfn)
+{
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_range_share;
+
+    mso.u.range.client_domain = client_domain;
+    mso.u.range.first_gfn = first_gfn;
+    mso.u.range.last_gfn = last_gfn;
+
+    return xc_memshr_memop(xch, source_domain, &mso);
+}
+
+int xc_memshr_domain_resume(xc_interface *xch,
+                            uint32_t domid)
+{
+    return xc_vm_event_control(xch, domid,
+                               XEN_VM_EVENT_RESUME,
+                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
+                               NULL);
+}
+
+int xc_memshr_debug_gfn(xc_interface *xch,
+                        uint32_t domid,
+                        unsigned long gfn)
+{
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_debug_gfn;
+    mso.u.debug.u.gfn = gfn;
+
+    return xc_memshr_memop(xch, domid, &mso);
+}
+
+int xc_memshr_debug_gref(xc_interface *xch,
+                         uint32_t domid,
+                         grant_ref_t gref)
+{
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_debug_gref;
+    mso.u.debug.u.gref = gref;
+
+    return xc_memshr_memop(xch, domid, &mso);
+}
+
+int xc_memshr_fork(xc_interface *xch, uint32_t pdomid, uint32_t domid,
+                   bool allow_with_iommu, bool block_interrupts)
+{
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_fork;
+    mso.u.fork.parent_domain = pdomid;
+
+    if ( allow_with_iommu )
+        mso.u.fork.flags |= XENMEM_FORK_WITH_IOMMU_ALLOWED;
+    if ( block_interrupts )
+        mso.u.fork.flags |= XENMEM_FORK_BLOCK_INTERRUPTS;
+
+    return xc_memshr_memop(xch, domid, &mso);
+}
+
+int xc_memshr_fork_reset(xc_interface *xch, uint32_t domid)
+{
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+    mso.op = XENMEM_sharing_op_fork_reset;
+
+    return xc_memshr_memop(xch, domid, &mso);
+}
+
+int xc_memshr_audit(xc_interface *xch)
+{
+    xen_mem_sharing_op_t mso;
+
+    memset(&mso, 0, sizeof(mso));
+
+    mso.op = XENMEM_sharing_op_audit;
+
+    return do_memory_op(xch, XENMEM_sharing_op, &mso, sizeof(mso));
+}
+
+long xc_sharing_freed_pages(xc_interface *xch)
+{
+    return do_memory_op(xch, XENMEM_get_sharing_freed_pages, NULL, 0);
+}
+
+long xc_sharing_used_frames(xc_interface *xch)
+{
+    return do_memory_op(xch, XENMEM_get_sharing_shared_pages, NULL, 0);
+}
diff --git a/tools/libs/ctrl/xc_minios.c b/tools/libs/ctrl/xc_minios.c
new file mode 100644 (file)
index 0000000..1799daa
--- /dev/null
@@ -0,0 +1,67 @@
+/******************************************************************************
+ *
+ * Copyright 2007-2008 Samuel Thibault <samuel.thibault@eu.citrix.com>.
+ * All rights reserved.
+ * Use is subject to license terms.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#undef NDEBUG
+#include <mini-os/types.h>
+#include <mini-os/os.h>
+#include <mini-os/mm.h>
+#include <mini-os/lib.h>
+
+#include <xen/memory.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <assert.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <malloc.h>
+
+#include "xc_private.h"
+
+void minios_interface_close_fd(int fd);
+
+extern void minios_interface_close_fd(int fd);
+
+void minios_interface_close_fd(int fd)
+{
+    files[fd].type = FTYPE_NONE;
+}
+
+/* Optionally flush file to disk and discard page cache */
+void discard_file_cache(xc_interface *xch, int fd, int flush)
+{
+    if (flush)
+        fsync(fd);
+}
+
+void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
+{
+    return memalign(alignment, size);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_misc.c b/tools/libs/ctrl/xc_misc.c
new file mode 100644 (file)
index 0000000..3820394
--- /dev/null
@@ -0,0 +1,999 @@
+/******************************************************************************
+ * xc_misc.c
+ *
+ * Miscellaneous control interface functions.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_bitops.h"
+#include "xc_private.h"
+#include <xen/hvm/hvm_op.h>
+
+int xc_get_max_cpus(xc_interface *xch)
+{
+    static int max_cpus = 0;
+    xc_physinfo_t physinfo;
+
+    if ( max_cpus )
+        return max_cpus;
+
+    if ( !xc_physinfo(xch, &physinfo) )
+    {
+        max_cpus = physinfo.max_cpu_id + 1;
+        return max_cpus;
+    }
+
+    return -1;
+}
+
+int xc_get_online_cpus(xc_interface *xch)
+{
+    xc_physinfo_t physinfo;
+
+    if ( !xc_physinfo(xch, &physinfo) )
+        return physinfo.nr_cpus;
+
+    return -1;
+}
+
+int xc_get_max_nodes(xc_interface *xch)
+{
+    static int max_nodes = 0;
+    xc_physinfo_t physinfo;
+
+    if ( max_nodes )
+        return max_nodes;
+
+    if ( !xc_physinfo(xch, &physinfo) )
+    {
+        max_nodes = physinfo.max_node_id + 1;
+        return max_nodes;
+    }
+
+    return -1;
+}
+
+int xc_get_cpumap_size(xc_interface *xch)
+{
+    int max_cpus = xc_get_max_cpus(xch);
+
+    if ( max_cpus < 0 )
+        return -1;
+    return (max_cpus + 7) / 8;
+}
+
+int xc_get_nodemap_size(xc_interface *xch)
+{
+    int max_nodes = xc_get_max_nodes(xch);
+
+    if ( max_nodes < 0 )
+        return -1;
+    return (max_nodes + 7) / 8;
+}
+
+xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
+{
+    int sz;
+
+    sz = xc_get_cpumap_size(xch);
+    if (sz <= 0)
+        return NULL;
+    return calloc(1, sz);
+}
+
+/*
+ * xc_bitops.h has macros that do this as well - however they assume that
+ * the bitmask is word aligned but xc_cpumap_t is only guaranteed to be
+ * byte aligned and so we need byte versions for architectures which do
+ * not support misaligned accesses (which is basically everyone
+ * but x86, although even on x86 it can be inefficient).
+ *
+ * NOTE: The xc_bitops macros now use byte alignment.
+ * TODO: Clean up the users of this interface.
+ */
+#define BITS_PER_CPUMAP(map) (sizeof(*map) * 8)
+#define CPUMAP_ENTRY(cpu, map) ((map))[(cpu) / BITS_PER_CPUMAP(map)]
+#define CPUMAP_SHIFT(cpu, map) ((cpu) % BITS_PER_CPUMAP(map))
+void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map)
+{
+    CPUMAP_ENTRY(cpu, map) &= ~(1U << CPUMAP_SHIFT(cpu, map));
+}
+
+void xc_cpumap_setcpu(int cpu, xc_cpumap_t map)
+{
+    CPUMAP_ENTRY(cpu, map) |= (1U << CPUMAP_SHIFT(cpu, map));
+}
+
+int xc_cpumap_testcpu(int cpu, xc_cpumap_t map)
+{
+    return (CPUMAP_ENTRY(cpu, map) >> CPUMAP_SHIFT(cpu, map)) & 1;
+}
+
+xc_nodemap_t xc_nodemap_alloc(xc_interface *xch)
+{
+    int sz;
+
+    sz = xc_get_nodemap_size(xch);
+    if (sz <= 0)
+        return NULL;
+    return calloc(1, sz);
+}
+
+int xc_readconsolering(xc_interface *xch,
+                       char *buffer,
+                       unsigned int *pnr_chars,
+                       int clear, int incremental, uint32_t *pindex)
+{
+    int ret;
+    unsigned int nr_chars = *pnr_chars;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(buffer, nr_chars, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, buffer) )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_readconsole;
+    set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
+    sysctl.u.readconsole.count = nr_chars;
+    sysctl.u.readconsole.clear = clear;
+    sysctl.u.readconsole.incremental = 0;
+    if ( pindex )
+    {
+        sysctl.u.readconsole.index = *pindex;
+        sysctl.u.readconsole.incremental = incremental;
+    }
+
+    if ( (ret = do_sysctl(xch, &sysctl)) == 0 )
+    {
+        *pnr_chars = sysctl.u.readconsole.count;
+        if ( pindex )
+            *pindex = sysctl.u.readconsole.index;
+    }
+
+    xc_hypercall_bounce_post(xch, buffer);
+
+    return ret;
+}
+
+int xc_send_debug_keys(xc_interface *xch, const char *keys)
+{
+    int ret, len = strlen(keys);
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE_IN(keys, len);
+
+    if ( xc_hypercall_bounce_pre(xch, keys) )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_debug_keys;
+    set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
+    sysctl.u.debug_keys.nr_keys = len;
+
+    ret = do_sysctl(xch, &sysctl);
+
+    xc_hypercall_bounce_post(xch, keys);
+
+    return ret;
+}
+
+int xc_physinfo(xc_interface *xch,
+                xc_physinfo_t *put_info)
+{
+    int ret;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_physinfo;
+
+    memcpy(&sysctl.u.physinfo, put_info, sizeof(*put_info));
+
+    if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
+        return ret;
+
+    memcpy(put_info, &sysctl.u.physinfo, sizeof(*put_info));
+
+    return 0;
+}
+
+int xc_microcode_update(xc_interface *xch, const void *buf, size_t len)
+{
+    int ret;
+    DECLARE_PLATFORM_OP;
+    DECLARE_HYPERCALL_BUFFER(struct xenpf_microcode_update, uc);
+
+    uc = xc_hypercall_buffer_alloc(xch, uc, len);
+    if ( uc == NULL )
+        return -1;
+
+    memcpy(uc, buf, len);
+
+    platform_op.cmd = XENPF_microcode_update;
+    platform_op.u.microcode.length = len;
+    set_xen_guest_handle(platform_op.u.microcode.data, uc);
+
+    ret = do_platform_op(xch, &platform_op);
+
+    xc_hypercall_buffer_free(xch, uc);
+
+    return ret;
+}
+
+int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
+                   xc_cputopo_t *cputopo)
+{
+    int ret;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(cputopo, *max_cpus * sizeof(*cputopo),
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( (ret = xc_hypercall_bounce_pre(xch, cputopo)) )
+        goto out;
+
+    sysctl.u.cputopoinfo.num_cpus = *max_cpus;
+    set_xen_guest_handle(sysctl.u.cputopoinfo.cputopo, cputopo);
+
+    sysctl.cmd = XEN_SYSCTL_cputopoinfo;
+
+    if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
+        goto out;
+
+    *max_cpus = sysctl.u.cputopoinfo.num_cpus;
+
+out:
+    xc_hypercall_bounce_post(xch, cputopo);
+
+    return ret;
+}
+
+int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
+                xc_meminfo_t *meminfo, uint32_t *distance)
+{
+    int ret;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(meminfo, *max_nodes * sizeof(*meminfo),
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(distance,
+                             *max_nodes * *max_nodes * sizeof(*distance),
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( (ret = xc_hypercall_bounce_pre(xch, meminfo)) )
+        goto out;
+    if ((ret = xc_hypercall_bounce_pre(xch, distance)) )
+        goto out;
+
+    sysctl.u.numainfo.num_nodes = *max_nodes;
+    set_xen_guest_handle(sysctl.u.numainfo.meminfo, meminfo);
+    set_xen_guest_handle(sysctl.u.numainfo.distance, distance);
+
+    sysctl.cmd = XEN_SYSCTL_numainfo;
+
+    if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
+        goto out;
+
+    *max_nodes = sysctl.u.numainfo.num_nodes;
+
+out:
+    xc_hypercall_bounce_post(xch, meminfo);
+    xc_hypercall_bounce_post(xch, distance);
+
+    return ret;
+}
+
+int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
+                   physdev_pci_device_t *devs,
+                   uint32_t *nodes)
+{
+    int ret = 0;
+    unsigned processed = 0;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(devs, num_devs * sizeof(*devs),
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(nodes, num_devs* sizeof(*nodes),
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( (ret = xc_hypercall_bounce_pre(xch, devs)) )
+        goto out;
+    if ( (ret = xc_hypercall_bounce_pre(xch, nodes)) )
+        goto out;
+
+    sysctl.cmd = XEN_SYSCTL_pcitopoinfo;
+
+    while ( processed < num_devs )
+    {
+        sysctl.u.pcitopoinfo.num_devs = num_devs - processed;
+        set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.devs, devs,
+                                    processed);
+        set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.nodes, nodes,
+                                    processed);
+
+        if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
+                break;
+
+        processed += sysctl.u.pcitopoinfo.num_devs;
+    }
+
+ out:
+    xc_hypercall_bounce_post(xch, devs);
+    xc_hypercall_bounce_post(xch, nodes);
+
+    return ret;
+}
+
+int xc_sched_id(xc_interface *xch,
+                int *sched_id)
+{
+    int ret;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_sched_id;
+
+    if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
+        return ret;
+
+    *sched_id = sysctl.u.sched_id.sched_id;
+
+    return 0;
+}
+
+#if defined(__i386__) || defined(__x86_64__)
+int xc_mca_op(xc_interface *xch, struct xen_mc *mc)
+{
+    int ret = 0;
+    DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, mc) )
+    {
+        PERROR("Could not bounce xen_mc memory buffer");
+        return -1;
+    }
+    mc->interface_version = XEN_MCA_INTERFACE_VERSION;
+
+    ret = xencall1(xch->xcall, __HYPERVISOR_mca,
+                   HYPERCALL_BUFFER_AS_ARG(mc));
+
+    xc_hypercall_bounce_post(xch, mc);
+    return ret;
+}
+
+int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
+                        xc_cpumap_t cpumap, unsigned int nr_bits)
+{
+    int ret = -1;
+    struct xen_mc mc_buf, *mc = &mc_buf;
+    struct xen_mc_inject_v2 *inject = &mc->u.mc_inject_v2;
+
+    DECLARE_HYPERCALL_BOUNCE(cpumap, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    memset(mc, 0, sizeof(*mc));
+
+    if ( cpumap )
+    {
+        if ( !nr_bits )
+        {
+            errno = EINVAL;
+            goto out;
+        }
+
+        HYPERCALL_BOUNCE_SET_SIZE(cpumap, (nr_bits + 7) / 8);
+        if ( xc_hypercall_bounce_pre(xch, cpumap) )
+        {
+            PERROR("Could not bounce cpumap memory buffer");
+            goto out;
+        }
+        set_xen_guest_handle(inject->cpumap.bitmap, cpumap);
+        inject->cpumap.nr_bits = nr_bits;
+    }
+
+    inject->flags = flags;
+    mc->cmd = XEN_MC_inject_v2;
+    mc->interface_version = XEN_MCA_INTERFACE_VERSION;
+
+    if ( xc_hypercall_bounce_pre(xch, mc) )
+    {
+        PERROR("Could not bounce xen_mc memory buffer");
+        goto out_free_cpumap;
+    }
+
+    ret = xencall1(xch->xcall, __HYPERVISOR_mca, HYPERCALL_BUFFER_AS_ARG(mc));
+
+    xc_hypercall_bounce_post(xch, mc);
+out_free_cpumap:
+    if ( cpumap )
+        xc_hypercall_bounce_post(xch, cpumap);
+out:
+    return ret;
+}
+#endif /* __i386__ || __x86_64__ */
+
+int xc_perfc_reset(xc_interface *xch)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_perfc_op;
+    sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_reset;
+    set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
+    set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
+
+    return do_sysctl(xch, &sysctl);
+}
+
+int xc_perfc_query_number(xc_interface *xch,
+                          int *nbr_desc,
+                          int *nbr_val)
+{
+    int rc;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_perfc_op;
+    sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
+    set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
+    set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
+
+    rc = do_sysctl(xch, &sysctl);
+
+    if ( nbr_desc )
+        *nbr_desc = sysctl.u.perfc_op.nr_counters;
+    if ( nbr_val )
+        *nbr_val = sysctl.u.perfc_op.nr_vals;
+
+    return rc;
+}
+
+int xc_perfc_query(xc_interface *xch,
+                   struct xc_hypercall_buffer *desc,
+                   struct xc_hypercall_buffer *val)
+{
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(desc);
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(val);
+
+    sysctl.cmd = XEN_SYSCTL_perfc_op;
+    sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
+    set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
+    set_xen_guest_handle(sysctl.u.perfc_op.val, val);
+
+    return do_sysctl(xch, &sysctl);
+}
+
+int xc_lockprof_reset(xc_interface *xch)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_lockprof_op;
+    sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_reset;
+    set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
+
+    return do_sysctl(xch, &sysctl);
+}
+
+int xc_lockprof_query_number(xc_interface *xch,
+                             uint32_t *n_elems)
+{
+    int rc;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_lockprof_op;
+    sysctl.u.lockprof_op.max_elem = 0;
+    sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
+    set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
+
+    rc = do_sysctl(xch, &sysctl);
+
+    *n_elems = sysctl.u.lockprof_op.nr_elem;
+
+    return rc;
+}
+
+int xc_lockprof_query(xc_interface *xch,
+                      uint32_t *n_elems,
+                      uint64_t *time,
+                      struct xc_hypercall_buffer *data)
+{
+    int rc;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(data);
+
+    sysctl.cmd = XEN_SYSCTL_lockprof_op;
+    sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
+    sysctl.u.lockprof_op.max_elem = *n_elems;
+    set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
+
+    rc = do_sysctl(xch, &sysctl);
+
+    *n_elems = sysctl.u.lockprof_op.nr_elem;
+
+    return rc;
+}
+
+int xc_getcpuinfo(xc_interface *xch, int max_cpus,
+                  xc_cpuinfo_t *info, int *nr_cpus)
+{
+    int rc;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(info, max_cpus*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, info) )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_getcpuinfo;
+    sysctl.u.getcpuinfo.max_cpus = max_cpus;
+    set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
+
+    rc = do_sysctl(xch, &sysctl);
+
+    xc_hypercall_bounce_post(xch, info);
+
+    if ( nr_cpus )
+        *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
+
+    return rc;
+}
+
+int xc_livepatch_upload(xc_interface *xch,
+                        char *name,
+                        unsigned char *payload,
+                        uint32_t size)
+{
+    int rc;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER(char, local);
+    DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    struct xen_livepatch_name def_name = { };
+
+    if ( !name || !payload )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    def_name.size = strlen(name) + 1;
+    if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
+
+    if ( xc_hypercall_bounce_pre(xch, name) )
+        return -1;
+
+    local = xc_hypercall_buffer_alloc(xch, local, size);
+    if ( !local )
+    {
+        xc_hypercall_bounce_post(xch, name);
+        return -1;
+    }
+    memcpy(local, payload, size);
+
+    sysctl.cmd = XEN_SYSCTL_livepatch_op;
+    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_UPLOAD;
+    sysctl.u.livepatch.pad = 0;
+    sysctl.u.livepatch.u.upload.size = size;
+    set_xen_guest_handle(sysctl.u.livepatch.u.upload.payload, local);
+
+    sysctl.u.livepatch.u.upload.name = def_name;
+    set_xen_guest_handle(sysctl.u.livepatch.u.upload.name.name, name);
+
+    rc = do_sysctl(xch, &sysctl);
+
+    xc_hypercall_buffer_free(xch, local);
+    xc_hypercall_bounce_post(xch, name);
+
+    return rc;
+}
+
+int xc_livepatch_get(xc_interface *xch,
+                     char *name,
+                     struct xen_livepatch_status *status)
+{
+    int rc;
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(name, 0 /*adjust later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    struct xen_livepatch_name def_name = { };
+
+    if ( !name )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    def_name.size = strlen(name) + 1;
+    if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
+
+    if ( xc_hypercall_bounce_pre(xch, name) )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_livepatch_op;
+    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_GET;
+    sysctl.u.livepatch.pad = 0;
+
+    sysctl.u.livepatch.u.get.status.state = 0;
+    sysctl.u.livepatch.u.get.status.rc = 0;
+
+    sysctl.u.livepatch.u.get.name = def_name;
+    set_xen_guest_handle(sysctl.u.livepatch.u.get.name.name, name);
+
+    rc = do_sysctl(xch, &sysctl);
+
+    xc_hypercall_bounce_post(xch, name);
+
+    memcpy(status, &sysctl.u.livepatch.u.get.status, sizeof(*status));
+
+    return rc;
+}
+
+/*
+ * Get a number of available payloads and get actual total size of
+ * the payloads' name and metadata arrays.
+ *
+ * This functions is typically executed first before the xc_livepatch_list()
+ * to obtain the sizes and correctly allocate all necessary data resources.
+ *
+ * The return value is zero if the hypercall completed successfully.
+ *
+ * If there was an error performing the sysctl operation, the return value
+ * will contain the hypercall error code value.
+ */
+int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
+                                uint32_t *name_total_size,
+                                uint32_t *metadata_total_size)
+{
+    DECLARE_SYSCTL;
+    int rc;
+
+    if ( !nr || !name_total_size || !metadata_total_size )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    memset(&sysctl, 0, sizeof(sysctl));
+    sysctl.cmd = XEN_SYSCTL_livepatch_op;
+    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST;
+
+    rc = do_sysctl(xch, &sysctl);
+    if ( rc )
+        return rc;
+
+    *nr = sysctl.u.livepatch.u.list.nr;
+    *name_total_size = sysctl.u.livepatch.u.list.name_total_size;
+    *metadata_total_size = sysctl.u.livepatch.u.list.metadata_total_size;
+
+    return 0;
+}
+
+/*
+ * The heart of this function is to get an array of the following objects:
+ *   - xen_livepatch_status_t: states and return codes of payloads
+ *   - name: names of payloads
+ *   - len: lengths of corresponding payloads' names
+ *   - metadata: payloads' metadata
+ *   - metadata_len: lengths of corresponding payloads' metadata
+ *
+ * However it is complex because it has to deal with the hypervisor
+ * returning some of the requested data or data being stale
+ * (another hypercall might alter the list).
+ *
+ * The parameters that the function expects to contain data from
+ * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
+ * 'left' are also updated with the number of entries filled out
+ * and respectively the number of entries left to get from hypervisor.
+ *
+ * It is expected that the caller of this function will first issue the
+ * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
+ * and all metadata as well as the current number of payload entries.
+ * The total sizes are required and supplied via the 'name_total_size' and
+ * 'metadata_total_size' parameters.
+ *
+ * The 'max' is to be provided by the caller with the maximum number of
+ * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
+ * can be filled up with.
+ *
+ * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
+ * structure size.
+ *
+ * Each entry in the 'name' array may have an arbitrary size.
+ *
+ * Each entry in the 'len' array is expected to be of uint32_t size.
+ *
+ * Each entry in the 'metadata' array may have an arbitrary size.
+ *
+ * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
+ *
+ * The return value is zero if the hypercall completed successfully.
+ * Note that the return value is _not_ the amount of entries filled
+ * out - that is saved in 'done'.
+ *
+ * If there was an error performing the operation, the return value
+ * will contain an negative -EXX type value. The 'done' and 'left'
+ * will contain the number of entries that had been succesfully
+ * retrieved (if any).
+ */
+int xc_livepatch_list(xc_interface *xch, const unsigned int max,
+                      const unsigned int start,
+                      struct xen_livepatch_status *info,
+                      char *name, uint32_t *len,
+                      const uint32_t name_total_size,
+                      char *metadata, uint32_t *metadata_len,
+                      const uint32_t metadata_total_size,
+                      unsigned int *done, unsigned int *left)
+{
+    int rc;
+    DECLARE_SYSCTL;
+    /* The sizes are adjusted later - hence zero. */
+    DECLARE_HYPERCALL_BOUNCE(info, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(metadata, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(metadata_len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    uint32_t max_batch_sz, nr;
+    uint32_t version = 0, retries = 0;
+    uint32_t adjust = 0;
+    uint32_t name_off = 0, metadata_off = 0;
+    uint32_t name_sz, metadata_sz;
+
+    if ( !max || !info || !name || !len ||
+         !metadata || !metadata_len || !done || !left )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    if ( name_total_size == 0 )
+    {
+        errno = ENOENT;
+        return -1;
+    }
+
+    memset(&sysctl, 0, sizeof(sysctl));
+    sysctl.cmd = XEN_SYSCTL_livepatch_op;
+    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST;
+    sysctl.u.livepatch.u.list.idx = start;
+
+    max_batch_sz = max;
+    name_sz = name_total_size;
+    metadata_sz = metadata_total_size;
+    *done = 0;
+    *left = 0;
+    do {
+        uint32_t _name_sz, _metadata_sz;
+
+        /*
+         * The first time we go in this loop our 'max' may be bigger
+         * than what the hypervisor is comfortable with - hence the first
+         * couple of loops may adjust the number of entries we will
+         * want filled (tracked by 'nr').
+         *
+         * N.B. This is a do { } while loop and the right hand side of
+         * the conditional when adjusting will evaluate to false (as
+         * *left is set to zero before the loop. Hence we need this
+         * adjust - even if we reset it at the start of the loop.
+         */
+        if ( adjust )
+            adjust = 0; /* Used when adjusting the 'max_batch_sz' or 'retries'. */
+
+        nr = min(max - *done, max_batch_sz);
+
+        sysctl.u.livepatch.u.list.nr = nr;
+        /* Fix the size (may vary between hypercalls). */
+        HYPERCALL_BOUNCE_SET_SIZE(info, nr * sizeof(*info));
+        HYPERCALL_BOUNCE_SET_SIZE(name, name_sz);
+        HYPERCALL_BOUNCE_SET_SIZE(len, nr * sizeof(*len));
+        HYPERCALL_BOUNCE_SET_SIZE(metadata, metadata_sz);
+        HYPERCALL_BOUNCE_SET_SIZE(metadata_len, nr * sizeof(*metadata_len));
+        /* Move the pointer to proper offset into 'info'. */
+        (HYPERCALL_BUFFER(info))->ubuf = info + *done;
+        (HYPERCALL_BUFFER(name))->ubuf = name + name_off;
+        (HYPERCALL_BUFFER(len))->ubuf = len + *done;
+        (HYPERCALL_BUFFER(metadata))->ubuf = metadata + metadata_off;
+        (HYPERCALL_BUFFER(metadata_len))->ubuf = metadata_len + *done;
+        /* Allocate memory. */
+        rc = xc_hypercall_bounce_pre(xch, info);
+        if ( rc )
+            break;
+
+        rc = xc_hypercall_bounce_pre(xch, name);
+        if ( rc )
+            break;
+
+        rc = xc_hypercall_bounce_pre(xch, len);
+        if ( rc )
+            break;
+
+        rc = xc_hypercall_bounce_pre(xch, metadata);
+        if ( rc )
+            break;
+
+        rc = xc_hypercall_bounce_pre(xch, metadata_len);
+        if ( rc )
+            break;
+
+        set_xen_guest_handle(sysctl.u.livepatch.u.list.status, info);
+        set_xen_guest_handle(sysctl.u.livepatch.u.list.name, name);
+        set_xen_guest_handle(sysctl.u.livepatch.u.list.len, len);
+        set_xen_guest_handle(sysctl.u.livepatch.u.list.metadata, metadata);
+        set_xen_guest_handle(sysctl.u.livepatch.u.list.metadata_len, metadata_len);
+
+        rc = do_sysctl(xch, &sysctl);
+        /*
+         * From here on we MUST call xc_hypercall_bounce. If rc < 0 we
+         * end up doing it (outside the loop), so using a break is OK.
+         */
+        if ( rc < 0 && errno == E2BIG )
+        {
+            if ( max_batch_sz <= 1 )
+                break;
+            max_batch_sz >>= 1;
+            adjust = 1; /* For the loop conditional to let us loop again. */
+            /* No memory leaks! */
+            xc_hypercall_bounce_post(xch, info);
+            xc_hypercall_bounce_post(xch, name);
+            xc_hypercall_bounce_post(xch, len);
+            xc_hypercall_bounce_post(xch, metadata);
+            xc_hypercall_bounce_post(xch, metadata_len);
+            continue;
+        }
+
+        if ( rc < 0 ) /* For all other errors we bail out. */
+            break;
+
+        if ( !version )
+            version = sysctl.u.livepatch.u.list.version;
+
+        if ( sysctl.u.livepatch.u.list.version != version )
+        {
+            /* We could make this configurable as parameter? */
+            if ( retries++ > 3 )
+            {
+                rc = -1;
+                errno = EBUSY;
+                break;
+            }
+            *done = 0; /* Retry from scratch. */
+            version = sysctl.u.livepatch.u.list.version;
+            adjust = 1; /* And make sure we continue in the loop. */
+            /* No memory leaks. */
+            xc_hypercall_bounce_post(xch, info);
+            xc_hypercall_bounce_post(xch, name);
+            xc_hypercall_bounce_post(xch, len);
+            xc_hypercall_bounce_post(xch, metadata);
+            xc_hypercall_bounce_post(xch, metadata_len);
+            continue;
+        }
+
+        /* We should never hit this, but just in case. */
+        if ( rc > nr )
+        {
+            errno = EOVERFLOW; /* Overflow! */
+            rc = -1;
+            break;
+        }
+        *left = sysctl.u.livepatch.u.list.nr; /* Total remaining count. */
+        _name_sz = sysctl.u.livepatch.u.list.name_total_size; /* Total received name size. */
+        _metadata_sz = sysctl.u.livepatch.u.list.metadata_total_size; /* Total received metadata size. */
+        /* Copy only up 'rc' of data' - we could add 'min(rc,nr) if desired. */
+        HYPERCALL_BOUNCE_SET_SIZE(info, (rc * sizeof(*info)));
+        HYPERCALL_BOUNCE_SET_SIZE(name, _name_sz);
+        HYPERCALL_BOUNCE_SET_SIZE(len, (rc * sizeof(*len)));
+        HYPERCALL_BOUNCE_SET_SIZE(metadata, _metadata_sz);
+        HYPERCALL_BOUNCE_SET_SIZE(metadata_len, (rc * sizeof(*metadata_len)));
+        /* Bounce the data and free the bounce buffer. */
+        xc_hypercall_bounce_post(xch, info);
+        xc_hypercall_bounce_post(xch, name);
+        xc_hypercall_bounce_post(xch, len);
+        xc_hypercall_bounce_post(xch, metadata);
+        xc_hypercall_bounce_post(xch, metadata_len);
+
+        name_sz -= _name_sz;
+        name_off += _name_sz;
+        metadata_sz -= _metadata_sz;
+        metadata_off += _metadata_sz;
+
+        /* And update how many elements of info we have copied into. */
+        *done += rc;
+        /* Update idx. */
+        sysctl.u.livepatch.u.list.idx = *done;
+    } while ( adjust || (*done < max && *left != 0) );
+
+    if ( rc < 0 )
+    {
+        xc_hypercall_bounce_post(xch, len);
+        xc_hypercall_bounce_post(xch, name);
+        xc_hypercall_bounce_post(xch, info);
+        xc_hypercall_bounce_post(xch, metadata);
+        xc_hypercall_bounce_post(xch, metadata_len);
+    }
+
+    return rc > 0 ? 0 : rc;
+}
+
+static int _xc_livepatch_action(xc_interface *xch,
+                                char *name,
+                                unsigned int action,
+                                uint32_t timeout,
+                                uint32_t flags)
+{
+    int rc;
+    DECLARE_SYSCTL;
+    /* The size is figured out when we strlen(name) */
+    DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    struct xen_livepatch_name def_name = { };
+
+    def_name.size = strlen(name) + 1;
+
+    if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
+
+    if ( xc_hypercall_bounce_pre(xch, name) )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_livepatch_op;
+    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_ACTION;
+    sysctl.u.livepatch.pad = 0;
+    sysctl.u.livepatch.u.action.cmd = action;
+    sysctl.u.livepatch.u.action.timeout = timeout;
+    sysctl.u.livepatch.u.action.flags = flags;
+    sysctl.u.livepatch.u.action.pad = 0;
+
+    sysctl.u.livepatch.u.action.name = def_name;
+    set_xen_guest_handle(sysctl.u.livepatch.u.action.name.name, name);
+
+    rc = do_sysctl(xch, &sysctl);
+
+    xc_hypercall_bounce_post(xch, name);
+
+    return rc;
+}
+
+int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
+{
+    return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_APPLY, timeout, flags);
+}
+
+int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
+{
+    return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REVERT, timeout, flags);
+}
+
+int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
+{
+    return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_UNLOAD, timeout, flags);
+}
+
+int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
+{
+    return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REPLACE, timeout, flags);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_monitor.c b/tools/libs/ctrl/xc_monitor.c
new file mode 100644 (file)
index 0000000..4ac823e
--- /dev/null
@@ -0,0 +1,257 @@
+/******************************************************************************
+ *
+ * xc_monitor.c
+ *
+ * Interface to VM event monitor
+ *
+ * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port)
+{
+    return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
+                              port);
+}
+
+int xc_monitor_disable(xc_interface *xch, uint32_t domain_id)
+{
+    return xc_vm_event_control(xch, domain_id,
+                               XEN_VM_EVENT_DISABLE,
+                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
+                               NULL);
+}
+
+int xc_monitor_resume(xc_interface *xch, uint32_t domain_id)
+{
+    return xc_vm_event_control(xch, domain_id,
+                               XEN_VM_EVENT_RESUME,
+                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
+                               NULL);
+}
+
+int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
+                                uint32_t *capabilities)
+{
+    int rc;
+    DECLARE_DOMCTL;
+
+    if ( !capabilities )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES;
+
+    rc = do_domctl(xch, &domctl);
+    if ( rc )
+        return rc;
+
+    *capabilities = domctl.u.monitor_op.event;
+    return 0;
+}
+
+int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
+                             uint16_t index, bool enable, bool sync,
+                             uint64_t bitmask, bool onchangeonly)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG;
+    domctl.u.monitor_op.u.mov_to_cr.index = index;
+    domctl.u.monitor_op.u.mov_to_cr.sync = sync;
+    domctl.u.monitor_op.u.mov_to_cr.onchangeonly = onchangeonly;
+    domctl.u.monitor_op.u.mov_to_cr.bitmask = bitmask;
+    domctl.u.monitor_op.u.mov_to_cr.pad1 = 0;
+    domctl.u.monitor_op.u.mov_to_cr.pad2 = 0;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
+                          bool enable, bool onchangeonly)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR;
+    domctl.u.monitor_op.u.mov_to_msr.msr = msr;
+    domctl.u.monitor_op.u.mov_to_msr.onchangeonly = onchangeonly;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
+                                   bool enable)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id,
+                          bool enable)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
+                                 bool enable)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id, bool enable,
+                             bool sync, bool allow_userspace)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST;
+    domctl.u.monitor_op.u.guest_request.sync = sync;
+    domctl.u.monitor_op.u.guest_request.allow_userspace = enable ? allow_userspace : false;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_inguest_pagefault(xc_interface *xch, uint32_t domain_id,
+                                bool disable)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = disable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
+                                bool enable)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP;
+    domctl.u.monitor_op.event = enable;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
+                                bool enable, bool sync)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION;
+    domctl.u.monitor_op.u.debug_exception.sync = sync;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_CPUID;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
+                               bool enable)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
+                                  bool enable)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED;
+
+    return do_domctl(xch, &domctl);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_msr_x86.h b/tools/libs/ctrl/xc_msr_x86.h
new file mode 100644 (file)
index 0000000..7f100e7
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * xc_msr_x86.h
+ *
+ * MSR definition macros
+ *
+ * Copyright (C) 2014      Intel Corporation
+ * Author Dongxiao Xu <dongxiao.xu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#ifndef XC_MSR_X86_H
+#define XC_MSR_X86_H
+
+#define MSR_IA32_TSC            0x00000010
+#define MSR_IA32_CMT_EVTSEL     0x00000c8d
+#define MSR_IA32_CMT_CTR        0x00000c8e
+
+#endif
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_netbsd.c b/tools/libs/ctrl/xc_netbsd.c
new file mode 100644 (file)
index 0000000..3197993
--- /dev/null
@@ -0,0 +1,74 @@
+/******************************************************************************
+ *
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <malloc.h>
+
+/* Optionally flush file to disk and discard page cache */
+void discard_file_cache(xc_interface *xch, int fd, int flush) 
+{
+    off_t cur = 0;
+    int saved_errno = errno;
+
+    if ( flush && (fsync(fd) < 0) )
+    {
+        /*PERROR("Failed to flush file: %s", strerror(errno));*/
+        goto out;
+    }
+
+    /*
+     * Calculate last page boundry of amount written so far
+     * unless we are flushing in which case entire cache
+     * is discarded.
+     */
+    if ( !flush )
+    {
+        if ( ( cur = lseek(fd, 0, SEEK_CUR)) == (off_t)-1 )
+            cur = 0;
+        cur &= ~(PAGE_SIZE - 1);
+    }
+
+    /* Discard from the buffer cache. */
+    if ( posix_fadvise(fd, 0, cur, POSIX_FADV_DONTNEED) < 0 )
+    {
+        /*PERROR("Failed to discard cache: %s", strerror(errno));*/
+        goto out;
+    }
+
+ out:
+    errno = saved_errno;
+}
+
+void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
+{
+    return valloc(size);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_pagetab.c b/tools/libs/ctrl/xc_pagetab.c
new file mode 100644 (file)
index 0000000..db25c20
--- /dev/null
@@ -0,0 +1,113 @@
+/******************************************************************************
+ * xc_pagetab.c
+ *
+ * Function to translate virtual to physical addresses.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+#include <xen/hvm/save.h>
+
+#define CR0_PG  0x80000000
+#define CR4_PAE 0x20
+#define PTE_PSE 0x80
+#define EFER_LMA 0x400
+
+
+unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
+                                           int vcpu, unsigned long long virt)
+{
+    xc_dominfo_t dominfo;
+    uint64_t paddr, mask, pte = 0;
+    int size, level, pt_levels = 2;
+    void *map;
+
+    if (xc_domain_getinfo(xch, dom, 1, &dominfo) != 1 
+        || dominfo.domid != dom)
+        return 0;
+
+    /* What kind of paging are we dealing with? */
+    if (dominfo.hvm) {
+        struct hvm_hw_cpu ctx;
+        if (xc_domain_hvm_getcontext_partial(xch, dom,
+                                             HVM_SAVE_CODE(CPU), vcpu,
+                                             &ctx, sizeof ctx) != 0)
+            return 0;
+        if (!(ctx.cr0 & CR0_PG))
+            return virt >> PAGE_SHIFT;
+        pt_levels = (ctx.msr_efer&EFER_LMA) ? 4 : (ctx.cr4&CR4_PAE) ? 3 : 2;
+        paddr = ctx.cr3 & ((pt_levels == 3) ? ~0x1full : ~0xfffull);
+    } else {
+        unsigned int gwidth;
+        vcpu_guest_context_any_t ctx;
+        if (xc_vcpu_getcontext(xch, dom, vcpu, &ctx) != 0)
+            return 0;
+        if (xc_domain_get_guest_width(xch, dom, &gwidth) != 0)
+            return 0;
+        if (gwidth == 8) {
+            pt_levels = 4;
+            paddr = (uint64_t)xen_cr3_to_pfn_x86_64(ctx.x64.ctrlreg[3])
+                << PAGE_SHIFT;
+        } else {
+            pt_levels = 3;
+            paddr = (uint64_t)xen_cr3_to_pfn_x86_32(ctx.x32.ctrlreg[3])
+                << PAGE_SHIFT;
+        }
+    }
+
+    if (pt_levels == 4) {
+        virt &= 0x0000ffffffffffffull;
+        mask =  0x0000ff8000000000ull;
+    } else if (pt_levels == 3) {
+        virt &= 0x00000000ffffffffull;
+        mask =  0x0000007fc0000000ull;
+    } else {
+        virt &= 0x00000000ffffffffull;
+        mask =  0x00000000ffc00000ull;
+    }
+    size = (pt_levels == 2 ? 4 : 8);
+
+    /* Walk the pagetables */
+    for (level = pt_levels; level > 0; level--) {
+        paddr += ((virt & mask) >> (xc_ffs64(mask) - 1)) * size;
+        map = xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_READ, 
+                                   paddr >>PAGE_SHIFT);
+        if (!map) 
+            return 0;
+        memcpy(&pte, map + (paddr & (PAGE_SIZE - 1)), size);
+        munmap(map, PAGE_SIZE);
+        if (!(pte & 1)) {
+            errno = EADDRNOTAVAIL;
+            return 0;
+        }
+        paddr = pte & 0x000ffffffffff000ull;
+        if ((level == 2 || (level == 3 && pt_levels == 4)) && (pte & PTE_PSE)) {
+            mask = ((mask ^ ~-mask) >> 1); /* All bits below first set bit */
+            return ((paddr & ~mask) | (virt & mask)) >> PAGE_SHIFT;
+        }
+        mask >>= (pt_levels == 2 ? 10 : 9);
+    }
+    return paddr >> PAGE_SHIFT;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_physdev.c b/tools/libs/ctrl/xc_physdev.c
new file mode 100644 (file)
index 0000000..460a8e7
--- /dev/null
@@ -0,0 +1,113 @@
+/******************************************************************************
+ * xc_physdev.c
+ *
+ * API for manipulating physical-device access permissions.
+ *
+ * Copyright (c) 2004, Rolf Neugebauer (Intel Research Cambridge)
+ * Copyright (c) 2004, K A Fraser (University of Cambridge)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+int xc_physdev_pci_access_modify(xc_interface *xch,
+                                 uint32_t domid,
+                                 int bus,
+                                 int dev,
+                                 int func,
+                                 int enable)
+{
+    errno = ENOSYS;
+    return -1;
+}
+
+int xc_physdev_map_pirq(xc_interface *xch,
+                        uint32_t domid,
+                        int index,
+                        int *pirq)
+{
+    int rc;
+    struct physdev_map_pirq map;
+
+    if ( !pirq )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    memset(&map, 0, sizeof(struct physdev_map_pirq));
+    map.domid = domid;
+    map.type = MAP_PIRQ_TYPE_GSI;
+    map.index = index;
+    map.pirq = *pirq < 0 ? index : *pirq;
+
+    rc = do_physdev_op(xch, PHYSDEVOP_map_pirq, &map, sizeof(map));
+
+    if ( !rc )
+        *pirq = map.pirq;
+
+    return rc;
+}
+
+int xc_physdev_map_pirq_msi(xc_interface *xch,
+                            uint32_t domid,
+                            int index,
+                            int *pirq,
+                            int devfn,
+                            int bus,
+                            int entry_nr,
+                            uint64_t table_base)
+{
+    int rc;
+    struct physdev_map_pirq map;
+
+    if ( !pirq )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    memset(&map, 0, sizeof(struct physdev_map_pirq));
+    map.domid = domid;
+    map.type = MAP_PIRQ_TYPE_MSI;
+    map.index = index;
+    map.pirq = *pirq;
+    map.bus = bus;
+    map.devfn = devfn;
+    map.entry_nr = entry_nr;
+    map.table_base = table_base;
+
+    rc = do_physdev_op(xch, PHYSDEVOP_map_pirq, &map, sizeof(map));
+
+    if ( !rc )
+        *pirq = map.pirq;
+
+    return rc;
+}
+
+int xc_physdev_unmap_pirq(xc_interface *xch,
+                          uint32_t domid,
+                          int pirq)
+{
+    int rc;
+    struct physdev_unmap_pirq unmap;
+
+    memset(&unmap, 0, sizeof(struct physdev_unmap_pirq));
+    unmap.domid = domid;
+    unmap.pirq = pirq;
+
+    rc = do_physdev_op(xch, PHYSDEVOP_unmap_pirq, &unmap, sizeof(unmap));
+
+    return rc;
+}
+
diff --git a/tools/libs/ctrl/xc_pm.c b/tools/libs/ctrl/xc_pm.c
new file mode 100644 (file)
index 0000000..76d7eb7
--- /dev/null
@@ -0,0 +1,455 @@
+/******************************************************************************
+ * xc_pm.c - Libxc API for Xen Power Management (Px/Cx/Tx, etc.) statistic
+ *
+ * Copyright (c) 2008, Liu Jinsong <jinsong.liu@intel.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <stdbool.h>
+#include "xc_private.h"
+
+#include <xen-tools/libs.h>
+
+/*
+ * Get PM statistic info
+ */
+int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px)
+{
+    DECLARE_SYSCTL;
+    int ret;
+
+    sysctl.cmd = XEN_SYSCTL_get_pmstat;
+    sysctl.u.get_pmstat.type = PMSTAT_get_max_px;
+    sysctl.u.get_pmstat.cpuid = cpuid;
+    ret = xc_sysctl(xch, &sysctl);
+    if ( ret )
+        return ret;
+
+    *max_px = sysctl.u.get_pmstat.u.getpx.total;
+    return ret;
+}
+
+int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt)
+{
+    DECLARE_SYSCTL;
+    /* Sizes unknown until xc_pm_get_max_px */
+    DECLARE_NAMED_HYPERCALL_BOUNCE(trans, pxpt->trans_pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(pt, pxpt->pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    int max_px, ret;
+
+    if ( !pxpt->trans_pt || !pxpt->pt )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    if ( (ret = xc_pm_get_max_px(xch, cpuid, &max_px)) != 0)
+        return ret;
+
+    HYPERCALL_BOUNCE_SET_SIZE(trans, max_px * max_px * sizeof(uint64_t));
+    HYPERCALL_BOUNCE_SET_SIZE(pt, max_px * sizeof(struct xc_px_val));
+
+    if ( xc_hypercall_bounce_pre(xch, trans) )
+        return ret;
+
+    if ( xc_hypercall_bounce_pre(xch, pt) )
+    {
+        xc_hypercall_bounce_post(xch, trans);
+        return ret;
+    }
+
+    sysctl.cmd = XEN_SYSCTL_get_pmstat;
+    sysctl.u.get_pmstat.type = PMSTAT_get_pxstat;
+    sysctl.u.get_pmstat.cpuid = cpuid;
+    sysctl.u.get_pmstat.u.getpx.total = max_px;
+    set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.trans_pt, trans);
+    set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.pt, pt);
+
+    ret = xc_sysctl(xch, &sysctl);
+    if ( ret )
+    {
+       xc_hypercall_bounce_post(xch, trans);
+       xc_hypercall_bounce_post(xch, pt);
+        return ret;
+    }
+
+    pxpt->total = sysctl.u.get_pmstat.u.getpx.total;
+    pxpt->usable = sysctl.u.get_pmstat.u.getpx.usable;
+    pxpt->last = sysctl.u.get_pmstat.u.getpx.last;
+    pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur;
+
+    xc_hypercall_bounce_post(xch, trans);
+    xc_hypercall_bounce_post(xch, pt);
+
+    return ret;
+}
+
+int xc_pm_reset_pxstat(xc_interface *xch, int cpuid)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_get_pmstat;
+    sysctl.u.get_pmstat.type = PMSTAT_reset_pxstat;
+    sysctl.u.get_pmstat.cpuid = cpuid;
+
+    return xc_sysctl(xch, &sysctl);
+}
+
+int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx)
+{
+    DECLARE_SYSCTL;
+    int ret = 0;
+
+    sysctl.cmd = XEN_SYSCTL_get_pmstat;
+    sysctl.u.get_pmstat.type = PMSTAT_get_max_cx;
+    sysctl.u.get_pmstat.cpuid = cpuid;
+    if ( (ret = xc_sysctl(xch, &sysctl)) != 0 )
+        return ret;
+
+    *max_cx = sysctl.u.get_pmstat.u.getcx.nr;
+    return ret;
+}
+
+int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt)
+{
+    DECLARE_SYSCTL;
+    DECLARE_NAMED_HYPERCALL_BOUNCE(triggers, cxpt->triggers,
+                                   cxpt->nr * sizeof(*cxpt->triggers),
+                                   XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(residencies, cxpt->residencies,
+                                   cxpt->nr * sizeof(*cxpt->residencies),
+                                   XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(pc, cxpt->pc,
+                                   cxpt->nr_pc * sizeof(*cxpt->pc),
+                                   XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(cc, cxpt->cc,
+                                   cxpt->nr_cc * sizeof(*cxpt->cc),
+                                   XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    int ret = -1;
+
+    if ( xc_hypercall_bounce_pre(xch, triggers) )
+        goto unlock_0;
+    if ( xc_hypercall_bounce_pre(xch, residencies) )
+        goto unlock_1;
+    if ( xc_hypercall_bounce_pre(xch, pc) )
+        goto unlock_2;
+    if ( xc_hypercall_bounce_pre(xch, cc) )
+        goto unlock_3;
+
+    sysctl.cmd = XEN_SYSCTL_get_pmstat;
+    sysctl.u.get_pmstat.type = PMSTAT_get_cxstat;
+    sysctl.u.get_pmstat.cpuid = cpuid;
+    sysctl.u.get_pmstat.u.getcx.nr = cxpt->nr;
+    sysctl.u.get_pmstat.u.getcx.nr_pc = cxpt->nr_pc;
+    sysctl.u.get_pmstat.u.getcx.nr_cc = cxpt->nr_cc;
+    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.triggers, triggers);
+    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.residencies, residencies);
+    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.pc, pc);
+    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.cc, cc);
+
+    if ( (ret = xc_sysctl(xch, &sysctl)) )
+        goto unlock_4;
+
+    cxpt->nr = sysctl.u.get_pmstat.u.getcx.nr;
+    cxpt->last = sysctl.u.get_pmstat.u.getcx.last;
+    cxpt->idle_time = sysctl.u.get_pmstat.u.getcx.idle_time;
+    cxpt->nr_pc = sysctl.u.get_pmstat.u.getcx.nr_pc;
+    cxpt->nr_cc = sysctl.u.get_pmstat.u.getcx.nr_cc;
+
+unlock_4:
+    xc_hypercall_bounce_post(xch, cc);
+unlock_3:
+    xc_hypercall_bounce_post(xch, pc);
+unlock_2:
+    xc_hypercall_bounce_post(xch, residencies);
+unlock_1:
+    xc_hypercall_bounce_post(xch, triggers);
+unlock_0:
+    return ret;
+}
+
+int xc_pm_reset_cxstat(xc_interface *xch, int cpuid)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_get_pmstat;
+    sysctl.u.get_pmstat.type = PMSTAT_reset_cxstat;
+    sysctl.u.get_pmstat.cpuid = cpuid;
+
+    return xc_sysctl(xch, &sysctl);
+}
+
+
+/*
+ * 1. Get PM parameter
+ * 2. Provide user PM control
+ */
+int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
+                        struct xc_get_cpufreq_para *user_para)
+{
+    DECLARE_SYSCTL;
+    int ret = 0;
+    struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para;
+    DECLARE_NAMED_HYPERCALL_BOUNCE(affected_cpus,
+                        user_para->affected_cpus,
+                        user_para->cpu_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_frequencies,
+                        user_para->scaling_available_frequencies,
+                        user_para->freq_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_governors,
+                        user_para->scaling_available_governors,
+                        user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    bool has_num = user_para->cpu_num &&
+                     user_para->freq_num &&
+                     user_para->gov_num;
+
+    if ( has_num )
+    {
+        if ( (!user_para->affected_cpus)                    ||
+             (!user_para->scaling_available_frequencies)    ||
+             (!user_para->scaling_available_governors) )
+        {
+            errno = EINVAL;
+            return -1;
+        }
+        if ( xc_hypercall_bounce_pre(xch, affected_cpus) )
+            goto unlock_1;
+        if ( xc_hypercall_bounce_pre(xch, scaling_available_frequencies) )
+            goto unlock_2;
+        if ( xc_hypercall_bounce_pre(xch, scaling_available_governors) )
+            goto unlock_3;
+
+        set_xen_guest_handle(sys_para->affected_cpus, affected_cpus);
+        set_xen_guest_handle(sys_para->scaling_available_frequencies, scaling_available_frequencies);
+        set_xen_guest_handle(sys_para->scaling_available_governors, scaling_available_governors);
+    }
+
+    sysctl.cmd = XEN_SYSCTL_pm_op;
+    sysctl.u.pm_op.cmd = GET_CPUFREQ_PARA;
+    sysctl.u.pm_op.cpuid = cpuid;
+    sys_para->cpu_num  = user_para->cpu_num;
+    sys_para->freq_num = user_para->freq_num;
+    sys_para->gov_num  = user_para->gov_num;
+
+    ret = xc_sysctl(xch, &sysctl);
+    if ( ret )
+    {
+        if ( errno == EAGAIN )
+        {
+            user_para->cpu_num  = sys_para->cpu_num;
+            user_para->freq_num = sys_para->freq_num;
+            user_para->gov_num  = sys_para->gov_num;
+            ret = -errno;
+        }
+
+        if ( has_num )
+            goto unlock_4;
+        goto unlock_1;
+    }
+    else
+    {
+        user_para->cpuinfo_cur_freq = sys_para->cpuinfo_cur_freq;
+        user_para->cpuinfo_max_freq = sys_para->cpuinfo_max_freq;
+        user_para->cpuinfo_min_freq = sys_para->cpuinfo_min_freq;
+        user_para->scaling_cur_freq = sys_para->scaling_cur_freq;
+        user_para->scaling_max_freq = sys_para->scaling_max_freq;
+        user_para->scaling_min_freq = sys_para->scaling_min_freq;
+        user_para->turbo_enabled    = sys_para->turbo_enabled;
+
+        memcpy(user_para->scaling_driver,
+                sys_para->scaling_driver, CPUFREQ_NAME_LEN);
+        memcpy(user_para->scaling_governor,
+                sys_para->scaling_governor, CPUFREQ_NAME_LEN);
+
+        /* copy to user_para no matter what cpufreq governor */
+        BUILD_BUG_ON(sizeof(((struct xc_get_cpufreq_para *)0)->u) !=
+                    sizeof(((struct xen_get_cpufreq_para *)0)->u));
+
+        memcpy(&user_para->u, &sys_para->u, sizeof(sys_para->u));
+    }
+
+unlock_4:
+    xc_hypercall_bounce_post(xch, scaling_available_governors);
+unlock_3:
+    xc_hypercall_bounce_post(xch, scaling_available_frequencies);
+unlock_2:
+    xc_hypercall_bounce_post(xch, affected_cpus);
+unlock_1:
+    return ret;
+}
+
+int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname)
+{
+    DECLARE_SYSCTL;
+    char *scaling_governor = sysctl.u.pm_op.u.set_gov.scaling_governor;
+
+    if ( !xch || !govname )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    sysctl.cmd = XEN_SYSCTL_pm_op;
+    sysctl.u.pm_op.cmd = SET_CPUFREQ_GOV;
+    sysctl.u.pm_op.cpuid = cpuid;
+    strncpy(scaling_governor, govname, CPUFREQ_NAME_LEN - 1);
+    scaling_governor[CPUFREQ_NAME_LEN - 1] = '\0';
+
+    return xc_sysctl(xch, &sysctl);
+}
+
+int xc_set_cpufreq_para(xc_interface *xch, int cpuid, 
+                        int ctrl_type, int ctrl_value)
+{
+    DECLARE_SYSCTL;
+
+    if ( !xch )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    sysctl.cmd = XEN_SYSCTL_pm_op;
+    sysctl.u.pm_op.cmd = SET_CPUFREQ_PARA;
+    sysctl.u.pm_op.cpuid = cpuid;
+    sysctl.u.pm_op.u.set_para.ctrl_type = ctrl_type;
+    sysctl.u.pm_op.u.set_para.ctrl_value = ctrl_value;
+
+    return xc_sysctl(xch, &sysctl);
+}
+
+int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq)
+{
+    int ret = 0;
+    DECLARE_SYSCTL;
+
+    if ( !xch || !avg_freq )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    sysctl.cmd = XEN_SYSCTL_pm_op;
+    sysctl.u.pm_op.cmd = GET_CPUFREQ_AVGFREQ;
+    sysctl.u.pm_op.cpuid = cpuid;
+    ret = xc_sysctl(xch, &sysctl);
+
+    *avg_freq = sysctl.u.pm_op.u.get_avgfreq;
+
+    return ret;
+}
+
+/* value:   0 - disable sched_smt_power_savings 
+            1 - enable sched_smt_power_savings
+ */
+int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value)
+{
+   int rc;
+   DECLARE_SYSCTL;
+
+   sysctl.cmd = XEN_SYSCTL_pm_op;
+   sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_sched_opt_smt;
+   sysctl.u.pm_op.cpuid = 0;
+   sysctl.u.pm_op.u.set_sched_opt_smt = value;
+   rc = do_sysctl(xch, &sysctl);
+
+   return rc;
+}
+
+static int get_max_cstate(xc_interface *xch, uint32_t *value, uint32_t type)
+{
+    int rc;
+    DECLARE_SYSCTL;
+
+    if ( !xch || !value )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    sysctl.cmd = XEN_SYSCTL_pm_op;
+    sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_get_max_cstate;
+    sysctl.u.pm_op.cpuid = type;
+    sysctl.u.pm_op.u.get_max_cstate = 0;
+    rc = do_sysctl(xch, &sysctl);
+    *value = sysctl.u.pm_op.u.get_max_cstate;
+
+    return rc;
+}
+
+int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value)
+{
+    return get_max_cstate(xch, value, 0);
+}
+
+int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value)
+{
+    return get_max_cstate(xch, value, 1);
+}
+
+static int set_max_cstate(xc_interface *xch, uint32_t value, uint32_t type)
+{
+    DECLARE_SYSCTL;
+
+    if ( !xch )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    sysctl.cmd = XEN_SYSCTL_pm_op;
+    sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_max_cstate;
+    sysctl.u.pm_op.cpuid = type;
+    sysctl.u.pm_op.u.set_max_cstate = value;
+
+    return do_sysctl(xch, &sysctl);
+}
+
+int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value)
+{
+    return set_max_cstate(xch, value, 0);
+}
+
+int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value)
+{
+    return set_max_cstate(xch, value, 1);
+}
+
+int xc_enable_turbo(xc_interface *xch, int cpuid)
+{
+    DECLARE_SYSCTL;
+
+    if ( !xch )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    sysctl.cmd = XEN_SYSCTL_pm_op;
+    sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_enable_turbo;
+    sysctl.u.pm_op.cpuid = cpuid;
+    return do_sysctl(xch, &sysctl);
+}
+
+int xc_disable_turbo(xc_interface *xch, int cpuid)
+{
+    DECLARE_SYSCTL;
+
+    if ( !xch )
+    {
+        errno = EINVAL;
+        return -1;
+    }
+    sysctl.cmd = XEN_SYSCTL_pm_op;
+    sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_disable_turbo;
+    sysctl.u.pm_op.cpuid = cpuid;
+    return do_sysctl(xch, &sysctl);
+}
diff --git a/tools/libs/ctrl/xc_private.c b/tools/libs/ctrl/xc_private.c
new file mode 100644 (file)
index 0000000..8af96b1
--- /dev/null
@@ -0,0 +1,781 @@
+/******************************************************************************
+ * xc_private.c
+ *
+ * Helper functions for the rest of the library.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+#include "xenctrl_dom.h"
+#include <stdarg.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <assert.h>
+
+struct xc_interface_core *xc_interface_open(xentoollog_logger *logger,
+                                            xentoollog_logger *dombuild_logger,
+                                            unsigned open_flags)
+{
+    struct xc_interface_core xch_buf = { 0 }, *xch = &xch_buf;
+
+    xch->flags = open_flags;
+    xch->dombuild_logger_file = 0;
+    xc_clear_last_error(xch);
+
+    xch->error_handler   = logger;           xch->error_handler_tofree   = 0;
+    xch->dombuild_logger = dombuild_logger;  xch->dombuild_logger_tofree = 0;
+
+    if (!xch->error_handler) {
+        xch->error_handler = xch->error_handler_tofree =
+            (xentoollog_logger*)
+            xtl_createlogger_stdiostream(stderr, XTL_PROGRESS, 0);
+        if (!xch->error_handler)
+            goto err;
+    }
+
+    xch = malloc(sizeof(*xch));
+    if (!xch) {
+        xch = &xch_buf;
+        PERROR("Could not allocate new xc_interface struct");
+        goto err;
+    }
+    *xch = xch_buf;
+
+    if (open_flags & XC_OPENFLAG_DUMMY)
+        return xch; /* We are done */
+
+    xch->xcall = xencall_open(xch->error_handler,
+        open_flags & XC_OPENFLAG_NON_REENTRANT ? XENCALL_OPENFLAG_NON_REENTRANT : 0U);
+    if ( xch->xcall == NULL )
+        goto err;
+
+    xch->fmem = xenforeignmemory_open(xch->error_handler, 0);
+    if ( xch->fmem == NULL )
+        goto err;
+
+    xch->dmod = xendevicemodel_open(xch->error_handler, 0);
+    if ( xch->dmod == NULL )
+        goto err;
+
+    return xch;
+
+ err:
+    xenforeignmemory_close(xch->fmem);
+    xencall_close(xch->xcall);
+    xtl_logger_destroy(xch->error_handler_tofree);
+    if (xch != &xch_buf) free(xch);
+    return NULL;
+}
+
+int xc_interface_close(xc_interface *xch)
+{
+    int rc = 0;
+
+    if (!xch)
+        return 0;
+
+    rc = xencall_close(xch->xcall);
+    if (rc) PERROR("Could not close xencall interface");
+
+    rc = xenforeignmemory_close(xch->fmem);
+    if (rc) PERROR("Could not close foreign memory interface");
+
+    rc = xendevicemodel_close(xch->dmod);
+    if (rc) PERROR("Could not close device model interface");
+
+    xtl_logger_destroy(xch->dombuild_logger_tofree);
+    xtl_logger_destroy(xch->error_handler_tofree);
+
+    free(xch);
+    return rc;
+}
+
+xencall_handle *xc_interface_xcall_handle(xc_interface *xch)
+{
+    return xch->xcall;
+}
+
+struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch)
+{
+    return xch->fmem;
+}
+
+struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch)
+{
+    return xch->dmod;
+}
+
+static pthread_key_t errbuf_pkey;
+static pthread_once_t errbuf_pkey_once = PTHREAD_ONCE_INIT;
+
+const xc_error *xc_get_last_error(xc_interface *xch)
+{
+    return &xch->last_error;
+}
+
+void xc_clear_last_error(xc_interface *xch)
+{
+    xch->last_error.code = XC_ERROR_NONE;
+    xch->last_error.message[0] = '\0';
+}
+
+const char *xc_error_code_to_desc(int code)
+{
+    /* Sync to members of xc_error_code enumeration in xenctrl.h */
+    switch ( code )
+    {
+    case XC_ERROR_NONE:
+        return "No error details";
+    case XC_INTERNAL_ERROR:
+        return "Internal error";
+    case XC_INVALID_KERNEL:
+        return "Invalid kernel";
+    case XC_INVALID_PARAM:
+        return "Invalid configuration";
+    case XC_OUT_OF_MEMORY:
+        return "Out of memory";
+    }
+
+    return "Unknown error code";
+}
+
+void xc_reportv(xc_interface *xch, xentoollog_logger *lg,
+                xentoollog_level level, int code,
+                const char *fmt, va_list args) {
+    int saved_errno = errno;
+    char msgbuf[XC_MAX_ERROR_MSG_LEN];
+    char *msg;
+
+    /* Strip newlines from messages.
+     * XXX really the messages themselves should have the newlines removed.
+     */
+    char fmt_nonewline[512];
+    int fmt_l;
+
+    fmt_l = strlen(fmt);
+    if (fmt_l && fmt[fmt_l-1]=='\n' && fmt_l < sizeof(fmt_nonewline)) {
+        memcpy(fmt_nonewline, fmt, fmt_l-1);
+        fmt_nonewline[fmt_l-1] = 0;
+        fmt = fmt_nonewline;
+    }
+
+    if ( level >= XTL_ERROR ) {
+        msg = xch->last_error.message;
+        xch->last_error.code = code;
+    } else {
+        msg = msgbuf;
+    }
+    vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
+    msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
+
+    xtl_log(lg, level, -1, "xc",
+            "%s" "%s%s", msg,
+            code?": ":"", code ? xc_error_code_to_desc(code) : "");
+
+    errno = saved_errno;
+}
+
+void xc_report(xc_interface *xch, xentoollog_logger *lg,
+               xentoollog_level level, int code, const char *fmt, ...) {
+    va_list args;
+    va_start(args,fmt);
+    xc_reportv(xch,lg,level,code,fmt,args);
+    va_end(args);
+}
+
+void xc_report_error(xc_interface *xch, int code, const char *fmt, ...)
+{
+    va_list args;
+    va_start(args, fmt);
+    xc_reportv(xch, xch->error_handler, XTL_ERROR, code, fmt, args);
+    va_end(args);
+}
+
+const char *xc_set_progress_prefix(xc_interface *xch, const char *doing)
+{
+    const char *old = xch->currently_progress_reporting;
+
+    xch->currently_progress_reporting = doing;
+    return old;
+}
+
+void xc_report_progress_single(xc_interface *xch, const char *doing)
+{
+    assert(doing);
+    xtl_progress(xch->error_handler, "xc", doing, 0, 0);
+}
+
+void xc_report_progress_step(xc_interface *xch,
+                             unsigned long done, unsigned long total)
+{
+    assert(xch->currently_progress_reporting);
+    xtl_progress(xch->error_handler, "xc",
+                 xch->currently_progress_reporting, done, total);
+}
+
+int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
+                          unsigned int num, xen_pfn_t *arr)
+{
+    int rc;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(arr, sizeof(*arr) * num, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    if ( xc_hypercall_bounce_pre(xch, arr) )
+        return -1;
+    domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
+    domctl.domain = dom;
+    domctl.u.getpageframeinfo3.num = num;
+    set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
+    rc = do_domctl_retry_efault(xch, &domctl);
+    xc_hypercall_bounce_post(xch, arr);
+    return rc;
+}
+
+int xc_mmuext_op(
+    xc_interface *xch,
+    struct mmuext_op *op,
+    unsigned int nr_ops,
+    uint32_t dom)
+{
+    DECLARE_HYPERCALL_BOUNCE(op, nr_ops*sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    long ret = -1;
+
+    if ( xc_hypercall_bounce_pre(xch, op) )
+    {
+        PERROR("Could not bounce memory for mmuext op hypercall");
+        goto out1;
+    }
+
+    ret = xencall4(xch->xcall, __HYPERVISOR_mmuext_op,
+                   HYPERCALL_BUFFER_AS_ARG(op),
+                   nr_ops, 0, dom);
+
+    xc_hypercall_bounce_post(xch, op);
+
+ out1:
+    return ret;
+}
+
+static int flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
+{
+    int rc, err = 0;
+    DECLARE_NAMED_HYPERCALL_BOUNCE(updates, mmu->updates, mmu->idx*sizeof(*mmu->updates), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( mmu->idx == 0 )
+        return 0;
+
+    if ( xc_hypercall_bounce_pre(xch, updates) )
+    {
+        PERROR("flush_mmu_updates: bounce buffer failed");
+        err = 1;
+        goto out;
+    }
+
+    rc = xencall4(xch->xcall, __HYPERVISOR_mmu_update,
+                  HYPERCALL_BUFFER_AS_ARG(updates),
+                  mmu->idx, 0, mmu->subject);
+    if ( rc < 0 )
+    {
+        ERROR("Failure when submitting mmu updates");
+        err = 1;
+    }
+
+    mmu->idx = 0;
+
+    xc_hypercall_bounce_post(xch, updates);
+
+ out:
+    return err;
+}
+
+struct xc_mmu *xc_alloc_mmu_updates(xc_interface *xch, unsigned int subject)
+{
+    struct xc_mmu *mmu = malloc(sizeof(*mmu));
+    if ( mmu == NULL )
+        return mmu;
+    mmu->idx     = 0;
+    mmu->subject = subject;
+    return mmu;
+}
+
+int xc_add_mmu_update(xc_interface *xch, struct xc_mmu *mmu,
+                      unsigned long long ptr, unsigned long long val)
+{
+    mmu->updates[mmu->idx].ptr = ptr;
+    mmu->updates[mmu->idx].val = val;
+
+    if ( ++mmu->idx == MAX_MMU_UPDATES )
+        return flush_mmu_updates(xch, mmu);
+
+    return 0;
+}
+
+int xc_flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
+{
+    return flush_mmu_updates(xch, mmu);
+}
+
+long do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len)
+{
+    DECLARE_HYPERCALL_BOUNCE(arg, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    long ret = -1;
+
+    if ( xc_hypercall_bounce_pre(xch, arg) )
+    {
+        PERROR("Could not bounce memory for XENMEM hypercall");
+        goto out1;
+    }
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_memory_op,
+                   cmd, HYPERCALL_BUFFER_AS_ARG(arg));
+
+    xc_hypercall_bounce_post(xch, arg);
+ out1:
+    return ret;
+}
+
+int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn)
+{
+    long rc = do_memory_op(xch, XENMEM_maximum_ram_page, NULL, 0);
+
+    if ( rc >= 0 )
+    {
+        *max_mfn = rc;
+        rc = 0;
+    }
+    return rc;
+}
+
+long long xc_domain_get_cpu_usage(xc_interface *xch, uint32_t domid, int vcpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_getvcpuinfo;
+    domctl.domain = domid;
+    domctl.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
+    if ( (do_domctl(xch, &domctl) < 0) )
+    {
+        PERROR("Could not get info on domain");
+        return -1;
+    }
+    return domctl.u.getvcpuinfo.cpu_time;
+}
+
+int xc_machphys_mfn_list(xc_interface *xch,
+                        unsigned long max_extents,
+                        xen_pfn_t *extent_start)
+{
+    int rc;
+    DECLARE_HYPERCALL_BOUNCE(extent_start, max_extents * sizeof(xen_pfn_t), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    struct xen_machphys_mfn_list xmml = {
+        .max_extents = max_extents,
+    };
+
+    if ( xc_hypercall_bounce_pre(xch, extent_start) )
+    {
+        PERROR("Could not bounce memory for XENMEM_machphys_mfn_list hypercall");
+        return -1;
+    }
+
+    set_xen_guest_handle(xmml.extent_start, extent_start);
+    rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
+    if (rc || xmml.nr_extents != max_extents)
+        rc = -1;
+    else
+        rc = 0;
+
+    xc_hypercall_bounce_post(xch, extent_start);
+
+    return rc;
+}
+
+long xc_get_tot_pages(xc_interface *xch, uint32_t domid)
+{
+    xc_dominfo_t info;
+    if ( (xc_domain_getinfo(xch, domid, 1, &info) != 1) ||
+         (info.domid != domid) )
+        return -1;
+    return info.nr_pages;
+}
+
+int xc_copy_to_domain_page(xc_interface *xch,
+                           uint32_t domid,
+                           unsigned long dst_pfn,
+                           const char *src_page)
+{
+    void *vaddr = xc_map_foreign_range(
+        xch, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
+    if ( vaddr == NULL )
+        return -1;
+    memcpy(vaddr, src_page, PAGE_SIZE);
+    munmap(vaddr, PAGE_SIZE);
+    xc_domain_cacheflush(xch, domid, dst_pfn, 1);
+    return 0;
+}
+
+int xc_clear_domain_pages(xc_interface *xch,
+                          uint32_t domid,
+                          unsigned long dst_pfn,
+                          int num)
+{
+    size_t size = num * PAGE_SIZE;
+    void *vaddr = xc_map_foreign_range(
+        xch, domid, size, PROT_WRITE, dst_pfn);
+    if ( vaddr == NULL )
+        return -1;
+    memset(vaddr, 0, size);
+    munmap(vaddr, size);
+    xc_domain_cacheflush(xch, domid, dst_pfn, num);
+    return 0;
+}
+
+int xc_domctl(xc_interface *xch, struct xen_domctl *domctl)
+{
+    return do_domctl(xch, domctl);
+}
+
+int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl)
+{
+    return do_sysctl(xch, sysctl);
+}
+
+int xc_version(xc_interface *xch, int cmd, void *arg)
+{
+    DECLARE_HYPERCALL_BOUNCE(arg, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT); /* Size unknown until cmd decoded */
+    size_t sz;
+    int rc;
+
+    switch ( cmd )
+    {
+    case XENVER_version:
+        sz = 0;
+        break;
+    case XENVER_extraversion:
+        sz = sizeof(xen_extraversion_t);
+        break;
+    case XENVER_compile_info:
+        sz = sizeof(xen_compile_info_t);
+        break;
+    case XENVER_capabilities:
+        sz = sizeof(xen_capabilities_info_t);
+        break;
+    case XENVER_changeset:
+        sz = sizeof(xen_changeset_info_t);
+        break;
+    case XENVER_platform_parameters:
+        sz = sizeof(xen_platform_parameters_t);
+        break;
+    case XENVER_get_features:
+        sz = sizeof(xen_feature_info_t);
+        break;
+    case XENVER_pagesize:
+        sz = 0;
+        break;
+    case XENVER_guest_handle:
+        sz = sizeof(xen_domain_handle_t);
+        break;
+    case XENVER_commandline:
+        sz = sizeof(xen_commandline_t);
+        break;
+    case XENVER_build_id:
+        {
+            xen_build_id_t *build_id = (xen_build_id_t *)arg;
+            sz = sizeof(*build_id) + build_id->len;
+            HYPERCALL_BOUNCE_SET_DIR(arg, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+            break;
+        }
+    default:
+        ERROR("xc_version: unknown command %d\n", cmd);
+        return -EINVAL;
+    }
+
+    HYPERCALL_BOUNCE_SET_SIZE(arg, sz);
+
+    if ( (sz != 0) && xc_hypercall_bounce_pre(xch, arg) )
+    {
+        PERROR("Could not bounce buffer for version hypercall");
+        return -ENOMEM;
+    }
+
+    rc = do_xen_version(xch, cmd, HYPERCALL_BUFFER(arg));
+
+    if ( sz != 0 )
+        xc_hypercall_bounce_post(xch, arg);
+
+    return rc;
+}
+
+unsigned long xc_make_page_below_4G(
+    xc_interface *xch, uint32_t domid, unsigned long mfn)
+{
+    xen_pfn_t old_mfn = mfn;
+    xen_pfn_t new_mfn;
+
+    if ( xc_domain_decrease_reservation_exact(
+        xch, domid, 1, 0, &old_mfn) != 0 )
+    {
+        DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
+        return 0;
+    }
+
+    if ( xc_domain_increase_reservation_exact(
+        xch, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 )
+    {
+        DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
+        return 0;
+    }
+
+    return new_mfn;
+}
+
+static void
+_xc_clean_errbuf(void * m)
+{
+    free(m);
+    pthread_setspecific(errbuf_pkey, NULL);
+}
+
+static void
+_xc_init_errbuf(void)
+{
+    pthread_key_create(&errbuf_pkey, _xc_clean_errbuf);
+}
+
+const char *xc_strerror(xc_interface *xch, int errcode)
+{
+    if ( xch->flags & XC_OPENFLAG_NON_REENTRANT )
+    {
+        return strerror(errcode);
+    }
+    else
+    {
+#define XS_BUFSIZE 32
+        char *errbuf;
+        static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+        char *strerror_str;
+
+        pthread_once(&errbuf_pkey_once, _xc_init_errbuf);
+
+        errbuf = pthread_getspecific(errbuf_pkey);
+        if (errbuf == NULL) {
+            errbuf = malloc(XS_BUFSIZE);
+            if ( errbuf == NULL )
+                return "(failed to allocate errbuf)";
+            pthread_setspecific(errbuf_pkey, errbuf);
+        }
+
+        /*
+         * Thread-unsafe strerror() is protected by a local mutex. We copy the
+         * string to a thread-private buffer before releasing the mutex.
+         */
+        pthread_mutex_lock(&mutex);
+        strerror_str = strerror(errcode);
+        strncpy(errbuf, strerror_str, XS_BUFSIZE);
+        errbuf[XS_BUFSIZE-1] = '\0';
+        pthread_mutex_unlock(&mutex);
+
+        return errbuf;
+    }
+}
+
+void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits)
+{
+    uint64_t l;
+    int i, j, b;
+
+    for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
+        l = lp[i];
+        for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
+            bp[b+j] = l;
+            l >>= 8;
+            nbits -= 8;
+        }
+    }
+}
+
+void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits)
+{
+    uint64_t l;
+    int i, j, b;
+
+    for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
+        l = 0;
+        for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
+            l |= (uint64_t)bp[b+j] << (j*8);
+            nbits -= 8;
+        }
+        lp[i] = l;
+    }
+}
+
+int read_exact(int fd, void *data, size_t size)
+{
+    size_t offset = 0;
+    ssize_t len;
+
+    while ( offset < size )
+    {
+        len = read(fd, (char *)data + offset, size - offset);
+        if ( (len == -1) && (errno == EINTR) )
+            continue;
+        if ( len == 0 )
+            errno = 0;
+        if ( len <= 0 )
+            return -1;
+        offset += len;
+    }
+
+    return 0;
+}
+
+int write_exact(int fd, const void *data, size_t size)
+{
+    size_t offset = 0;
+    ssize_t len;
+
+    while ( offset < size )
+    {
+        len = write(fd, (const char *)data + offset, size - offset);
+        if ( (len == -1) && (errno == EINTR) )
+            continue;
+        if ( len <= 0 )
+            return -1;
+        offset += len;
+    }
+
+    return 0;
+}
+
+#if defined(__MINIOS__)
+/*
+ * MiniOS's libc doesn't know about writev(). Implement it as multiple write()s.
+ */
+int writev_exact(int fd, const struct iovec *iov, int iovcnt)
+{
+    int rc, i;
+
+    for ( i = 0; i < iovcnt; ++i )
+    {
+        rc = write_exact(fd, iov[i].iov_base, iov[i].iov_len);
+        if ( rc )
+            return rc;
+    }
+
+    return 0;
+}
+#else
+int writev_exact(int fd, const struct iovec *iov, int iovcnt)
+{
+    struct iovec *local_iov = NULL;
+    int rc = 0, iov_idx = 0, saved_errno = 0;
+    ssize_t len;
+
+    while ( iov_idx < iovcnt )
+    {
+        /*
+         * Skip over iov[] entries with 0 length.
+         *
+         * This is needed to cover the case where we took a partial write and
+         * all remaining vectors are of 0 length.  In such a case, the results
+         * from writev() are indistinguishable from EOF.
+         */
+        while ( iov[iov_idx].iov_len == 0 )
+            if ( ++iov_idx == iovcnt )
+                goto out;
+
+        len = writev(fd, &iov[iov_idx], min(iovcnt - iov_idx, IOV_MAX));
+        saved_errno = errno;
+
+        if ( (len == -1) && (errno == EINTR) )
+            continue;
+        if ( len <= 0 )
+        {
+            rc = -1;
+            goto out;
+        }
+
+        /* Check iov[] to see whether we had a partial or complete write. */
+        while ( (len > 0) && (iov_idx < iovcnt) )
+        {
+            if ( len >= iov[iov_idx].iov_len )
+                len -= iov[iov_idx++].iov_len;
+            else
+            {
+                /* Partial write of iov[iov_idx]. Copy iov so we can adjust
+                 * element iov_idx and resubmit the rest. */
+                if ( !local_iov )
+                {
+                    local_iov = malloc(iovcnt * sizeof(*iov));
+                    if ( !local_iov )
+                    {
+                        saved_errno = ENOMEM;
+                        goto out;
+                    }
+
+                    iov = memcpy(local_iov, iov, iovcnt * sizeof(*iov));
+                }
+
+                local_iov[iov_idx].iov_base += len;
+                local_iov[iov_idx].iov_len  -= len;
+                break;
+            }
+        }
+    }
+
+    saved_errno = 0;
+
+ out:
+    free(local_iov);
+    errno = saved_errno;
+    return rc;
+}
+#endif
+
+int xc_ffs8(uint8_t x)
+{
+    int i;
+    for ( i = 0; i < 8; i++ )
+        if ( x & (1u << i) )
+            return i+1;
+    return 0;
+}
+
+int xc_ffs16(uint16_t x)
+{
+    uint8_t h = x>>8, l = x;
+    return l ? xc_ffs8(l) : h ? xc_ffs8(h) + 8 : 0;
+}
+
+int xc_ffs32(uint32_t x)
+{
+    uint16_t h = x>>16, l = x;
+    return l ? xc_ffs16(l) : h ? xc_ffs16(h) + 16 : 0;
+}
+
+int xc_ffs64(uint64_t x)
+{
+    uint32_t h = x>>32, l = x;
+    return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_private.h b/tools/libs/ctrl/xc_private.h
new file mode 100644 (file)
index 0000000..f0b5f83
--- /dev/null
@@ -0,0 +1,479 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef XC_PRIVATE_H
+#define XC_PRIVATE_H
+
+#include <unistd.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <sys/ioctl.h>
+
+#include "_paths.h"
+
+#define XC_WANT_COMPAT_MAP_FOREIGN_API
+#define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
+#include "xenctrl.h"
+
+#include <xencall.h>
+#include <xenforeignmemory.h>
+#include <xendevicemodel.h>
+
+#include <xen/sys/privcmd.h>
+
+#include <xen-tools/libs.h>
+
+#if defined(HAVE_VALGRIND_MEMCHECK_H) && !defined(NDEBUG) && !defined(__MINIOS__)
+/* Compile in Valgrind client requests? */
+#include <valgrind/memcheck.h>
+#else
+#define VALGRIND_MAKE_MEM_UNDEFINED(addr, len) /* addr, len */
+#endif
+
+#if defined(__MINIOS__)
+/*
+ * MiniOS's libc doesn't know about sys/uio.h or writev().
+ * Declare enough of sys/uio.h to compile.
+ */
+struct iovec {
+    void *iov_base;
+    size_t iov_len;
+};
+#else
+#include <sys/uio.h>
+#endif
+
+#define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
+
+#define GET_FIELD(_p, _f, _w) (((_w) == 8) ? ((_p)->x64._f) : ((_p)->x32._f))
+
+#define SET_FIELD(_p, _f, _v, _w) do {          \
+    if ((_w) == 8)                              \
+        (_p)->x64._f = (_v);                    \
+    else                                        \
+        (_p)->x32._f = (_v);                    \
+} while (0)
+
+/* XXX SMH: following skanky macros rely on variable p2m_size being set */
+/* XXX TJD: also, "guest_width" should be the guest's sizeof(unsigned long) */
+
+struct domain_info_context {
+    unsigned int guest_width;
+    unsigned long p2m_size;
+};
+
+/* Number of xen_pfn_t in a page */
+#define FPP             (PAGE_SIZE/(dinfo->guest_width))
+
+/* Number of entries in the pfn_to_mfn_frame_list_list */
+#define P2M_FLL_ENTRIES (((dinfo->p2m_size)+(FPP*FPP)-1)/(FPP*FPP))
+
+/* Number of entries in the pfn_to_mfn_frame_list */
+#define P2M_FL_ENTRIES  (((dinfo->p2m_size)+FPP-1)/FPP)
+
+/* Size in bytes of the pfn_to_mfn_frame_list     */
+#define P2M_GUEST_FL_SIZE ((P2M_FL_ENTRIES) * (dinfo->guest_width))
+#define P2M_TOOLS_FL_SIZE ((P2M_FL_ENTRIES) *                           \
+                           max_t(size_t, sizeof(xen_pfn_t), dinfo->guest_width))
+
+#define DECLARE_DOMCTL struct xen_domctl domctl
+#define DECLARE_SYSCTL struct xen_sysctl sysctl
+#define DECLARE_PHYSDEV_OP struct physdev_op physdev_op
+#define DECLARE_FLASK_OP struct xen_flask_op op
+#define DECLARE_PLATFORM_OP struct xen_platform_op platform_op
+
+#undef PAGE_SHIFT
+#undef PAGE_SIZE
+#undef PAGE_MASK
+#define PAGE_SHIFT              XC_PAGE_SHIFT
+#define PAGE_SIZE               XC_PAGE_SIZE
+#define PAGE_MASK               XC_PAGE_MASK
+
+#define INVALID_PFN ((xen_pfn_t)-1)
+
+/*
+** Define max dirty page cache to permit during save/restore -- need to balance 
+** keeping cache usage down with CPU impact of invalidating too often.
+** (Currently 16MB)
+*/
+#define MAX_PAGECACHE_USAGE (4*1024)
+
+struct xc_interface_core {
+    int flags;
+    xentoollog_logger *error_handler,   *error_handler_tofree;
+    xentoollog_logger *dombuild_logger, *dombuild_logger_tofree;
+    struct xc_error last_error; /* for xc_get_last_error */
+    FILE *dombuild_logger_file;
+    const char *currently_progress_reporting;
+
+    /* Hypercall interface */
+    xencall_handle *xcall;
+
+    /* Foreign mappings */
+    xenforeignmemory_handle *fmem;
+
+    /* Device model */
+    xendevicemodel_handle *dmod;
+};
+
+void *osdep_alloc_hypercall_buffer(xc_interface *xch, int npages);
+void osdep_free_hypercall_buffer(xc_interface *xch, void *ptr, int npages);
+
+void xc_report_error(xc_interface *xch, int code, const char *fmt, ...)
+    __attribute__((format(printf,3,4)));
+void xc_reportv(xc_interface *xch, xentoollog_logger *lg, xentoollog_level,
+                int code, const char *fmt, va_list args)
+     __attribute__((format(printf,5,0)));
+void xc_report(xc_interface *xch, xentoollog_logger *lg, xentoollog_level,
+               int code, const char *fmt, ...)
+     __attribute__((format(printf,5,6)));
+
+const char *xc_set_progress_prefix(xc_interface *xch, const char *doing);
+void xc_report_progress_single(xc_interface *xch, const char *doing);
+void xc_report_progress_step(xc_interface *xch,
+                             unsigned long done, unsigned long total);
+
+/* anamorphic macros:  struct xc_interface *xch  must be in scope */
+
+#define IPRINTF(_f, _a...)  do { int IPRINTF_errno = errno; \
+        xc_report(xch, xch->error_handler, XTL_INFO,0, _f , ## _a); \
+        errno = IPRINTF_errno; \
+        } while (0)
+#define DPRINTF(_f, _a...) do { int DPRINTF_errno = errno; \
+        xc_report(xch, xch->error_handler, XTL_DETAIL,0, _f , ## _a); \
+        errno = DPRINTF_errno; \
+        } while (0)
+#define DBGPRINTF(_f, _a...)  do { int DBGPRINTF_errno = errno; \
+        xc_report(xch, xch->error_handler, XTL_DEBUG,0, _f , ## _a); \
+        errno = DBGPRINTF_errno; \
+        } while (0)
+
+#define ERROR(_m, _a...)  do { int ERROR_errno = errno; \
+        xc_report_error(xch,XC_INTERNAL_ERROR,_m , ## _a ); \
+        errno = ERROR_errno; \
+        } while (0)
+#define PERROR(_m, _a...) do { int PERROR_errno = errno; \
+        xc_report_error(xch,XC_INTERNAL_ERROR,_m " (%d = %s)", \
+        ## _a , errno, xc_strerror(xch, errno)); \
+        errno = PERROR_errno; \
+        } while (0)
+
+/*
+ * HYPERCALL ARGUMENT BUFFERS
+ *
+ * Augment the public hypercall buffer interface with the ability to
+ * bounce between user provided buffers and hypercall safe memory.
+ *
+ * Use xc_hypercall_bounce_pre/post instead of
+ * xc_hypercall_buffer_alloc/free(_pages).  The specified user
+ * supplied buffer is automatically copied in/out of the hypercall
+ * safe memory.
+ */
+enum {
+    XC_HYPERCALL_BUFFER_BOUNCE_NONE = 0,
+    XC_HYPERCALL_BUFFER_BOUNCE_IN   = 1,
+    XC_HYPERCALL_BUFFER_BOUNCE_OUT  = 2,
+    XC_HYPERCALL_BUFFER_BOUNCE_BOTH = 3
+};
+
+/*
+ * Declare a named bounce buffer.
+ *
+ * Normally you should use DECLARE_HYPERCALL_BOUNCE (see below).
+ *
+ * This declaration should only be used when the user pointer is
+ * non-trivial, e.g. when it is contained within an existing data
+ * structure.
+ */
+#define DECLARE_NAMED_HYPERCALL_BOUNCE(_name, _ubuf, _sz, _dir) \
+    xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = {  \
+        .hbuf = NULL,                                           \
+        .param_shadow = NULL,                                   \
+        .sz = _sz, .dir = _dir, .ubuf = _ubuf,                  \
+    }
+
+/*
+ * Declare a bounce buffer shadowing the named user data pointer.
+ */
+#define DECLARE_HYPERCALL_BOUNCE(_ubuf, _sz, _dir) DECLARE_NAMED_HYPERCALL_BOUNCE(_ubuf, _ubuf, _sz, _dir)
+
+/*
+ * Declare a bounce buffer shadowing the named user data pointer that
+ * cannot be modified.
+ */
+#define DECLARE_HYPERCALL_BOUNCE_IN(_ubuf, _sz)                     \
+    DECLARE_NAMED_HYPERCALL_BOUNCE(_ubuf, (void *)(_ubuf), _sz,     \
+                                   XC_HYPERCALL_BUFFER_BOUNCE_IN)
+
+/*
+ * Set the size of data to bounce. Useful when the size is not known
+ * when the bounce buffer is declared.
+ */
+#define HYPERCALL_BOUNCE_SET_SIZE(_buf, _sz) do { (HYPERCALL_BUFFER(_buf))->sz = _sz; } while (0)
+
+/*
+ * Change the direction.
+ *
+ * Can only be used if the bounce_pre/bounce_post commands have
+ * not been used.
+ */
+#define HYPERCALL_BOUNCE_SET_DIR(_buf, _dir) do { if ((HYPERCALL_BUFFER(_buf))->hbuf)         \
+                                                        assert(1);                            \
+                                                   (HYPERCALL_BUFFER(_buf))->dir = _dir;      \
+                                                } while (0)
+
+/*
+ * Initialise and free hypercall safe memory. Takes care of any required
+ * copying.
+ */
+int xc__hypercall_bounce_pre(xc_interface *xch, xc_hypercall_buffer_t *bounce);
+#define xc_hypercall_bounce_pre(_xch, _name) xc__hypercall_bounce_pre(_xch, HYPERCALL_BUFFER(_name))
+void xc__hypercall_bounce_post(xc_interface *xch, xc_hypercall_buffer_t *bounce);
+#define xc_hypercall_bounce_post(_xch, _name) xc__hypercall_bounce_post(_xch, HYPERCALL_BUFFER(_name))
+
+/*
+ * Release hypercall buffer cache
+ */
+void xc__hypercall_buffer_cache_release(xc_interface *xch);
+
+/*
+ * Hypercall interfaces.
+ */
+
+static inline int do_xen_version(xc_interface *xch, int cmd, xc_hypercall_buffer_t *dest)
+{
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(dest);
+    return xencall2(xch->xcall, __HYPERVISOR_xen_version,
+                    cmd, HYPERCALL_BUFFER_AS_ARG(dest));
+}
+
+static inline int do_physdev_op(xc_interface *xch, int cmd, void *op, size_t len)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BOUNCE(op, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, op) )
+    {
+        PERROR("Could not bounce memory for physdev hypercall");
+        goto out1;
+    }
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_physdev_op,
+                   cmd, HYPERCALL_BUFFER_AS_ARG(op));
+    if ( ret < 0 )
+    {
+        if ( errno == EACCES )
+            DPRINTF("physdev operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+    }
+
+    xc_hypercall_bounce_post(xch, op);
+out1:
+    return ret;
+}
+
+static inline int do_domctl_maybe_retry_efault(xc_interface *xch,
+                                               struct xen_domctl *domctl,
+                                               unsigned int retries)
+{
+    int ret = -1;
+    unsigned int retry_cnt = 0;
+
+    DECLARE_HYPERCALL_BOUNCE(domctl, sizeof(*domctl), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    domctl->interface_version = XEN_DOMCTL_INTERFACE_VERSION;
+
+    if ( xc_hypercall_bounce_pre(xch, domctl) )
+    {
+        PERROR("Could not bounce buffer for domctl hypercall");
+        goto out1;
+    }
+
+    do {
+        ret = xencall1(xch->xcall, __HYPERVISOR_domctl,
+                       HYPERCALL_BUFFER_AS_ARG(domctl));
+    } while ( ret < 0 && errno == EFAULT && retry_cnt++ < retries );
+
+    if ( ret < 0 )
+    {
+        if ( errno == EACCES )
+            DPRINTF("domctl operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+    }
+
+    xc_hypercall_bounce_post(xch, domctl);
+ out1:
+    return ret;
+}
+
+static inline int do_domctl(xc_interface *xch, struct xen_domctl *domctl)
+{
+    return do_domctl_maybe_retry_efault(xch, domctl, 0);
+}
+
+static inline int do_domctl_retry_efault(xc_interface *xch, struct xen_domctl *domctl)
+{
+    unsigned int retries = xencall_buffers_never_fault(xch->xcall) ? 0 : 2;
+
+    return do_domctl_maybe_retry_efault(xch, domctl, retries);
+}
+
+static inline int do_sysctl(xc_interface *xch, struct xen_sysctl *sysctl)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BOUNCE(sysctl, sizeof(*sysctl), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    sysctl->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
+
+    if ( xc_hypercall_bounce_pre(xch, sysctl) )
+    {
+        PERROR("Could not bounce buffer for sysctl hypercall");
+        goto out1;
+    }
+
+    ret = xencall1(xch->xcall, __HYPERVISOR_sysctl,
+                   HYPERCALL_BUFFER_AS_ARG(sysctl));
+    if ( ret < 0 )
+    {
+        if ( errno == EACCES )
+            DPRINTF("sysctl operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+    }
+
+    xc_hypercall_bounce_post(xch, sysctl);
+ out1:
+    return ret;
+}
+
+static inline int do_platform_op(xc_interface *xch,
+                                 struct xen_platform_op *platform_op)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BOUNCE(platform_op, sizeof(*platform_op),
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    platform_op->interface_version = XENPF_INTERFACE_VERSION;
+
+    if ( xc_hypercall_bounce_pre(xch, platform_op) )
+    {
+        PERROR("Could not bounce buffer for platform_op hypercall");
+        return -1;
+    }
+
+    ret = xencall1(xch->xcall, __HYPERVISOR_platform_op,
+                   HYPERCALL_BUFFER_AS_ARG(platform_op));
+    if ( ret < 0 )
+    {
+        if ( errno == EACCES )
+            DPRINTF("platform operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+    }
+
+    xc_hypercall_bounce_post(xch, platform_op);
+    return ret;
+}
+
+static inline int do_multicall_op(xc_interface *xch,
+                                  xc_hypercall_buffer_t *call_list,
+                                  uint32_t nr_calls)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(call_list);
+
+    ret = xencall2(xch->xcall, __HYPERVISOR_multicall,
+                   HYPERCALL_BUFFER_AS_ARG(call_list), nr_calls);
+    if ( ret < 0 )
+    {
+        if ( errno == EACCES )
+            DPRINTF("multicall operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+    }
+
+    return ret;
+}
+
+long do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len);
+
+void *xc_map_foreign_ranges(xc_interface *xch, uint32_t dom,
+                            size_t size, int prot, size_t chunksize,
+                            privcmd_mmap_entry_t entries[], int nentries);
+
+int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
+                          unsigned int num, xen_pfn_t *);
+
+void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits);
+void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits);
+
+/* Optionally flush file to disk and discard page cache */
+void discard_file_cache(xc_interface *xch, int fd, int flush);
+
+#define MAX_MMU_UPDATES 1024
+struct xc_mmu {
+    mmu_update_t updates[MAX_MMU_UPDATES];
+    int          idx;
+    unsigned int subject;
+};
+/* Structure returned by xc_alloc_mmu_updates must be free()'ed by caller. */
+struct xc_mmu *xc_alloc_mmu_updates(xc_interface *xch, unsigned int subject);
+int xc_add_mmu_update(xc_interface *xch, struct xc_mmu *mmu,
+                   unsigned long long ptr, unsigned long long val);
+int xc_flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu);
+
+/* Return 0 on success; -1 on error setting errno. */
+int read_exact(int fd, void *data, size_t size); /* EOF => -1, errno=0 */
+int write_exact(int fd, const void *data, size_t size);
+int writev_exact(int fd, const struct iovec *iov, int iovcnt);
+
+int xc_ffs8(uint8_t x);
+int xc_ffs16(uint16_t x);
+int xc_ffs32(uint32_t x);
+int xc_ffs64(uint64_t x);
+
+#define DOMPRINTF(fmt, args...) xc_dom_printf(dom->xch, fmt, ## args)
+#define DOMPRINTF_CALLED(xch) xc_dom_printf((xch), "%s: called", __FUNCTION__)
+
+/**
+ * vm_event operations. Internal use only.
+ */
+int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
+                        unsigned int mode, uint32_t *port);
+/*
+ * Enables vm_event and returns the mapped ring page indicated by param.
+ * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
+ */
+void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
+                         uint32_t *port);
+
+int do_dm_op(xc_interface *xch, uint32_t domid, unsigned int nr_bufs, ...);
+
+#endif /* __XC_PRIVATE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_psr.c b/tools/libs/ctrl/xc_psr.c
new file mode 100644 (file)
index 0000000..1a0ab63
--- /dev/null
@@ -0,0 +1,395 @@
+/*
+ * xc_psr.c
+ *
+ * platform shared resource related API functions.
+ *
+ * Copyright (C) 2014      Intel Corporation
+ * Author Dongxiao Xu <dongxiao.xu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#include <assert.h>
+#include "xc_private.h"
+#include "xc_msr_x86.h"
+
+#define IA32_CMT_CTR_ERROR_MASK         (0x3ull << 62)
+
+#define EVTID_L3_OCCUPANCY             0x1
+#define EVTID_TOTAL_MEM_COUNT          0x2
+#define EVTID_LOCAL_MEM_COUNT          0x3
+
+int xc_psr_cmt_attach(xc_interface *xch, uint32_t domid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_psr_cmt_op;
+    domctl.domain = domid;
+    domctl.u.psr_cmt_op.cmd = XEN_DOMCTL_PSR_CMT_OP_ATTACH;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_psr_cmt_detach(xc_interface *xch, uint32_t domid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_psr_cmt_op;
+    domctl.domain = domid;
+    domctl.u.psr_cmt_op.cmd = XEN_DOMCTL_PSR_CMT_OP_DETACH;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_psr_cmt_get_domain_rmid(xc_interface *xch, uint32_t domid,
+                               uint32_t *rmid)
+{
+    int rc;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_psr_cmt_op;
+    domctl.domain = domid;
+    domctl.u.psr_cmt_op.cmd = XEN_DOMCTL_PSR_CMT_OP_QUERY_RMID;
+
+    rc = do_domctl(xch, &domctl);
+
+    if ( !rc )
+        *rmid = domctl.u.psr_cmt_op.data;
+
+    return rc;
+}
+
+int xc_psr_cmt_get_total_rmid(xc_interface *xch, uint32_t *total_rmid)
+{
+    static int val = 0;
+    int rc;
+    DECLARE_SYSCTL;
+
+    if ( val )
+    {
+        *total_rmid = val;
+        return 0;
+    }
+
+    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
+    sysctl.u.psr_cmt_op.cmd = XEN_SYSCTL_PSR_CMT_get_total_rmid;
+    sysctl.u.psr_cmt_op.flags = 0;
+
+    rc = xc_sysctl(xch, &sysctl);
+    if ( !rc )
+        val = *total_rmid = sysctl.u.psr_cmt_op.u.data;
+
+    return rc;
+}
+
+int xc_psr_cmt_get_l3_upscaling_factor(xc_interface *xch,
+                                       uint32_t *upscaling_factor)
+{
+    static int val = 0;
+    int rc;
+    DECLARE_SYSCTL;
+
+    if ( val )
+    {
+        *upscaling_factor = val;
+        return 0;
+    }
+
+    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
+    sysctl.u.psr_cmt_op.cmd =
+        XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor;
+    sysctl.u.psr_cmt_op.flags = 0;
+
+    rc = xc_sysctl(xch, &sysctl);
+    if ( !rc )
+        val = *upscaling_factor = sysctl.u.psr_cmt_op.u.data;
+
+    return rc;
+}
+
+int xc_psr_cmt_get_l3_event_mask(xc_interface *xch, uint32_t *event_mask)
+{
+    int rc;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
+    sysctl.u.psr_cmt_op.cmd =
+        XEN_SYSCTL_PSR_CMT_get_l3_event_mask;
+    sysctl.u.psr_cmt_op.flags = 0;
+
+    rc = xc_sysctl(xch, &sysctl);
+    if ( !rc )
+        *event_mask = sysctl.u.psr_cmt_op.u.data;
+
+    return rc;
+}
+
+int xc_psr_cmt_get_l3_cache_size(xc_interface *xch, uint32_t cpu,
+                                 uint32_t *l3_cache_size)
+{
+    static int val = 0;
+    int rc;
+    DECLARE_SYSCTL;
+
+    if ( val )
+    {
+        *l3_cache_size = val;
+        return 0;
+    }
+
+    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
+    sysctl.u.psr_cmt_op.cmd =
+        XEN_SYSCTL_PSR_CMT_get_l3_cache_size;
+    sysctl.u.psr_cmt_op.flags = 0;
+    sysctl.u.psr_cmt_op.u.l3_cache.cpu = cpu;
+
+    rc = xc_sysctl(xch, &sysctl);
+    if ( !rc )
+        val = *l3_cache_size= sysctl.u.psr_cmt_op.u.data;
+
+    return rc;
+}
+
+int xc_psr_cmt_get_data(xc_interface *xch, uint32_t rmid, uint32_t cpu,
+                        xc_psr_cmt_type type, uint64_t *monitor_data,
+                        uint64_t *tsc)
+{
+    xc_resource_op_t op;
+    xc_resource_entry_t entries[3];
+    xc_resource_entry_t *tsc_entry = NULL;
+    uint32_t evtid, nr = 0;
+    int rc;
+
+    switch ( type )
+    {
+    case XC_PSR_CMT_L3_OCCUPANCY:
+        evtid = EVTID_L3_OCCUPANCY;
+        break;
+    case XC_PSR_CMT_TOTAL_MEM_COUNT:
+        evtid = EVTID_TOTAL_MEM_COUNT;
+        break;
+    case XC_PSR_CMT_LOCAL_MEM_COUNT:
+        evtid = EVTID_LOCAL_MEM_COUNT;
+        break;
+    default:
+        return -1;
+    }
+
+    entries[nr].u.cmd = XEN_RESOURCE_OP_MSR_WRITE;
+    entries[nr].idx = MSR_IA32_CMT_EVTSEL;
+    entries[nr].val = (uint64_t)rmid << 32 | evtid;
+    entries[nr].rsvd = 0;
+    nr++;
+
+    entries[nr].u.cmd = XEN_RESOURCE_OP_MSR_READ;
+    entries[nr].idx = MSR_IA32_CMT_CTR;
+    entries[nr].val = 0;
+    entries[nr].rsvd = 0;
+    nr++;
+
+    if ( tsc != NULL )
+    {
+        tsc_entry = &entries[nr];
+        entries[nr].u.cmd = XEN_RESOURCE_OP_MSR_READ;
+        entries[nr].idx = MSR_IA32_TSC;
+        entries[nr].val = 0;
+        entries[nr].rsvd = 0;
+        nr++;
+    }
+
+    assert(nr <= ARRAY_SIZE(entries));
+
+    op.cpu = cpu;
+    op.nr_entries = nr;
+    op.entries = entries;
+
+    rc = xc_resource_op(xch, 1, &op);
+    if ( rc < 0 )
+        return rc;
+
+    if ( op.result != nr || entries[1].val & IA32_CMT_CTR_ERROR_MASK )
+        return -1;
+
+    *monitor_data = entries[1].val;
+
+    if ( tsc_entry != NULL )
+        *tsc = tsc_entry->val;
+
+    return 0;
+}
+
+int xc_psr_cmt_enabled(xc_interface *xch)
+{
+    static int val = -1;
+    int rc;
+    DECLARE_SYSCTL;
+
+    if ( val >= 0 )
+        return val;
+
+    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
+    sysctl.u.psr_cmt_op.cmd = XEN_SYSCTL_PSR_CMT_enabled;
+    sysctl.u.psr_cmt_op.flags = 0;
+
+    rc = do_sysctl(xch, &sysctl);
+    if ( !rc )
+    {
+        val = sysctl.u.psr_cmt_op.u.data;
+        return val;
+    }
+
+    return 0;
+}
+int xc_psr_set_domain_data(xc_interface *xch, uint32_t domid,
+                           xc_psr_type type, uint32_t target,
+                           uint64_t data)
+{
+    DECLARE_DOMCTL;
+    uint32_t cmd;
+
+    switch ( type )
+    {
+    case XC_PSR_CAT_L3_CBM:
+        cmd = XEN_DOMCTL_PSR_SET_L3_CBM;
+        break;
+    case XC_PSR_CAT_L3_CBM_CODE:
+        cmd = XEN_DOMCTL_PSR_SET_L3_CODE;
+        break;
+    case XC_PSR_CAT_L3_CBM_DATA:
+        cmd = XEN_DOMCTL_PSR_SET_L3_DATA;
+        break;
+    case XC_PSR_CAT_L2_CBM:
+        cmd = XEN_DOMCTL_PSR_SET_L2_CBM;
+        break;
+    case XC_PSR_MBA_THRTL:
+        cmd = XEN_DOMCTL_PSR_SET_MBA_THRTL;
+        break;
+    default:
+        errno = EINVAL;
+        return -1;
+    }
+
+    domctl.cmd = XEN_DOMCTL_psr_alloc;
+    domctl.domain = domid;
+    domctl.u.psr_alloc.cmd = cmd;
+    domctl.u.psr_alloc.target = target;
+    domctl.u.psr_alloc.data = data;
+
+    return do_domctl(xch, &domctl);
+}
+
+int xc_psr_get_domain_data(xc_interface *xch, uint32_t domid,
+                           xc_psr_type type, uint32_t target,
+                           uint64_t *data)
+{
+    int rc;
+    DECLARE_DOMCTL;
+    uint32_t cmd;
+
+    switch ( type )
+    {
+    case XC_PSR_CAT_L3_CBM:
+        cmd = XEN_DOMCTL_PSR_GET_L3_CBM;
+        break;
+    case XC_PSR_CAT_L3_CBM_CODE:
+        cmd = XEN_DOMCTL_PSR_GET_L3_CODE;
+        break;
+    case XC_PSR_CAT_L3_CBM_DATA:
+        cmd = XEN_DOMCTL_PSR_GET_L3_DATA;
+        break;
+    case XC_PSR_CAT_L2_CBM:
+        cmd = XEN_DOMCTL_PSR_GET_L2_CBM;
+        break;
+    case XC_PSR_MBA_THRTL:
+        cmd = XEN_DOMCTL_PSR_GET_MBA_THRTL;
+        break;
+    default:
+        errno = EINVAL;
+        return -1;
+    }
+
+    domctl.cmd = XEN_DOMCTL_psr_alloc;
+    domctl.domain = domid;
+    domctl.u.psr_alloc.cmd = cmd;
+    domctl.u.psr_alloc.target = target;
+
+    rc = do_domctl(xch, &domctl);
+
+    if ( !rc )
+        *data = domctl.u.psr_alloc.data;
+
+    return rc;
+}
+
+int xc_psr_get_hw_info(xc_interface *xch, uint32_t socket,
+                       xc_psr_feat_type type, xc_psr_hw_info *hw_info)
+{
+    int rc = -1;
+    DECLARE_SYSCTL;
+
+    if ( !hw_info )
+    {
+        errno = EINVAL;
+        return rc;
+    }
+
+    sysctl.cmd = XEN_SYSCTL_psr_alloc;
+    sysctl.u.psr_alloc.target = socket;
+
+    switch ( type )
+    {
+    case XC_PSR_CAT_L2:
+    case XC_PSR_CAT_L3:
+        sysctl.u.psr_alloc.cmd = (type == XC_PSR_CAT_L2) ?
+                                 XEN_SYSCTL_PSR_get_l2_info :
+                                 XEN_SYSCTL_PSR_get_l3_info;
+
+        rc = xc_sysctl(xch, &sysctl);
+        if ( rc )
+            break;
+
+        hw_info->cat.cos_max = sysctl.u.psr_alloc.u.cat_info.cos_max;
+        hw_info->cat.cbm_len = sysctl.u.psr_alloc.u.cat_info.cbm_len;
+        hw_info->cat.cdp_enabled = (type == XC_PSR_CAT_L2) ?
+                                   false :
+                                   (sysctl.u.psr_alloc.u.cat_info.flags &
+                                    XEN_SYSCTL_PSR_CAT_L3_CDP);
+
+        break;
+    case XC_PSR_MBA:
+        sysctl.u.psr_alloc.cmd = XEN_SYSCTL_PSR_get_mba_info;
+        rc = xc_sysctl(xch, &sysctl);
+        if ( rc )
+            break;
+
+        hw_info->mba.cos_max = sysctl.u.psr_alloc.u.mba_info.cos_max;
+        hw_info->mba.thrtl_max = sysctl.u.psr_alloc.u.mba_info.thrtl_max;
+        hw_info->mba.linear = sysctl.u.psr_alloc.u.mba_info.flags &
+                              XEN_SYSCTL_PSR_MBA_LINEAR;
+
+        break;
+    default:
+        errno = EOPNOTSUPP;
+        break;
+    }
+
+    return rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_resource.c b/tools/libs/ctrl/xc_resource.c
new file mode 100644 (file)
index 0000000..3394cc1
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * xc_resource.c
+ *
+ * Generic resource access API
+ *
+ * Copyright (C) 2014      Intel Corporation
+ * Author Dongxiao Xu <dongxiao.xu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#include "xc_private.h"
+
+static int xc_resource_op_one(xc_interface *xch, xc_resource_op_t *op)
+{
+    int rc;
+    DECLARE_PLATFORM_OP;
+    DECLARE_NAMED_HYPERCALL_BOUNCE(entries, op->entries,
+                                op->nr_entries * sizeof(*op->entries),
+                                XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, entries) )
+        return -1;
+
+    platform_op.cmd = XENPF_resource_op;
+    platform_op.u.resource_op.nr_entries = op->nr_entries;
+    platform_op.u.resource_op.cpu = op->cpu;
+    set_xen_guest_handle(platform_op.u.resource_op.entries, entries);
+
+    rc = do_platform_op(xch, &platform_op);
+    op->result = rc;
+
+    xc_hypercall_bounce_post(xch, entries);
+
+    return rc;
+}
+
+static int xc_resource_op_multi(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops)
+{
+    int rc, i, entries_size;
+    xc_resource_op_t *op;
+    multicall_entry_t *call;
+    DECLARE_HYPERCALL_BUFFER(multicall_entry_t, call_list);
+    xc_hypercall_buffer_array_t *platform_ops, *entries_list = NULL;
+
+    call_list = xc_hypercall_buffer_alloc(xch, call_list,
+                                          sizeof(*call_list) * nr_ops);
+    if ( !call_list )
+        return -1;
+
+    platform_ops = xc_hypercall_buffer_array_create(xch, nr_ops);
+    if ( !platform_ops )
+    {
+        rc = -1;
+        goto out;
+    }
+
+    entries_list = xc_hypercall_buffer_array_create(xch, nr_ops);
+    if ( !entries_list )
+    {
+        rc = -1;
+        goto out;
+    }
+
+    for ( i = 0; i < nr_ops; i++ )
+    {
+        DECLARE_HYPERCALL_BUFFER(xen_platform_op_t, platform_op);
+        DECLARE_HYPERCALL_BUFFER(xc_resource_entry_t, entries);
+
+        op = ops + i;
+
+        platform_op = xc_hypercall_buffer_array_alloc(xch, platform_ops, i,
+                        platform_op, sizeof(xen_platform_op_t));
+        if ( !platform_op )
+        {
+            rc = -1;
+            goto out;
+        }
+
+        entries_size = sizeof(xc_resource_entry_t) * op->nr_entries;
+        entries = xc_hypercall_buffer_array_alloc(xch, entries_list, i,
+                   entries, entries_size);
+        if ( !entries)
+        {
+            rc = -1;
+            goto out;
+        }
+        memcpy(entries, op->entries, entries_size);
+
+        call = call_list + i;
+        call->op = __HYPERVISOR_platform_op;
+        call->args[0] = HYPERCALL_BUFFER_AS_ARG(platform_op);
+
+        platform_op->interface_version = XENPF_INTERFACE_VERSION;
+        platform_op->cmd = XENPF_resource_op;
+        platform_op->u.resource_op.cpu = op->cpu;
+        platform_op->u.resource_op.nr_entries = op->nr_entries;
+        set_xen_guest_handle(platform_op->u.resource_op.entries, entries);
+    }
+
+    rc = do_multicall_op(xch, HYPERCALL_BUFFER(call_list), nr_ops);
+
+    for ( i = 0; i < nr_ops; i++ )
+    {
+        DECLARE_HYPERCALL_BUFFER(xc_resource_entry_t, entries);
+        op = ops + i;
+
+        call = call_list + i;
+        op->result = call->result;
+
+        entries_size = sizeof(xc_resource_entry_t) * op->nr_entries;
+        entries = xc_hypercall_buffer_array_get(xch, entries_list, i,
+                   entries, entries_size);
+        memcpy(op->entries, entries, entries_size);
+    }
+
+out:
+    xc_hypercall_buffer_array_destroy(xch, entries_list);
+    xc_hypercall_buffer_array_destroy(xch, platform_ops);
+    xc_hypercall_buffer_free(xch, call_list);
+    return rc;
+}
+
+int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops)
+{
+    if ( nr_ops == 1 )
+        return xc_resource_op_one(xch, ops);
+
+    if ( nr_ops > 1 )
+        return xc_resource_op_multi(xch, nr_ops, ops);
+
+    return -1;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_resume.c b/tools/libs/ctrl/xc_resume.c
new file mode 100644 (file)
index 0000000..94c6c9f
--- /dev/null
@@ -0,0 +1,288 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <xen/foreign/x86_32.h>
+#include <xen/foreign/x86_64.h>
+#include <xen/hvm/params.h>
+
+static int modify_returncode(xc_interface *xch, uint32_t domid)
+{
+    vcpu_guest_context_any_t ctxt;
+    xc_dominfo_t info;
+    xen_capabilities_info_t caps;
+    struct domain_info_context _dinfo = {};
+    struct domain_info_context *dinfo = &_dinfo;
+    int rc;
+
+    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 ||
+         info.domid != domid )
+    {
+        PERROR("Could not get domain info");
+        return -1;
+    }
+
+    if ( !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) )
+    {
+        ERROR("Dom %d not suspended: (shutdown %d, reason %d)", domid,
+              info.shutdown, info.shutdown_reason);
+        errno = EINVAL;
+        return -1;
+    }
+
+    if ( info.hvm )
+    {
+        /* HVM guests without PV drivers have no return code to modify. */
+        uint64_t irq = 0;
+        xc_hvm_param_get(xch, domid, HVM_PARAM_CALLBACK_IRQ, &irq);
+        if ( !irq )
+            return 0;
+
+        /* HVM guests have host address width. */
+        if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
+        {
+            PERROR("Could not get Xen capabilities");
+            return -1;
+        }
+        dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4;
+    }
+    else
+    {
+        /* Probe PV guest address width. */
+        if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) )
+            return -1;
+    }
+
+    if ( (rc = xc_vcpu_getcontext(xch, domid, 0, &ctxt)) != 0 )
+        return rc;
+
+    SET_FIELD(&ctxt, user_regs.eax, 1, dinfo->guest_width);
+
+    if ( (rc = xc_vcpu_setcontext(xch, domid, 0, &ctxt)) != 0 )
+        return rc;
+
+    return 0;
+}
+
+#else
+
+static int modify_returncode(xc_interface *xch, uint32_t domid)
+{
+    return 0;
+
+}
+
+#endif
+
+static int xc_domain_resume_cooperative(xc_interface *xch, uint32_t domid)
+{
+    DECLARE_DOMCTL;
+    int rc;
+
+    /*
+     * Set hypercall return code to indicate that suspend is cancelled
+     * (rather than resuming in a new domain context).
+     */
+    if ( (rc = modify_returncode(xch, domid)) != 0 )
+        return rc;
+
+    domctl.cmd = XEN_DOMCTL_resumedomain;
+    domctl.domain = domid;
+    return do_domctl(xch, &domctl);
+}
+
+#if defined(__i386__) || defined(__x86_64__)
+static int xc_domain_resume_hvm(xc_interface *xch, uint32_t domid)
+{
+    DECLARE_DOMCTL;
+
+    /*
+     * The domctl XEN_DOMCTL_resumedomain unpause each vcpu. After
+     * the domctl, the guest will run.
+     *
+     * If it is PVHVM, the guest called the hypercall
+     *    SCHEDOP_shutdown:SHUTDOWN_suspend
+     * to suspend itself. We don't modify the return code, so the PV driver
+     * will disconnect and reconnect.
+     *
+     * If it is a HVM, the guest will continue running.
+     */
+    domctl.cmd = XEN_DOMCTL_resumedomain;
+    domctl.domain = domid;
+    return do_domctl(xch, &domctl);
+}
+#endif
+
+static int xc_domain_resume_any(xc_interface *xch, uint32_t domid)
+{
+    DECLARE_DOMCTL;
+    xc_dominfo_t info;
+    int i, rc = -1;
+#if defined(__i386__) || defined(__x86_64__)
+    struct domain_info_context _dinfo = { .guest_width = 0,
+                                          .p2m_size = 0 };
+    struct domain_info_context *dinfo = &_dinfo;
+    unsigned long mfn;
+    vcpu_guest_context_any_t ctxt;
+    start_info_t *start_info;
+    shared_info_t *shinfo = NULL;
+    xen_pfn_t *p2m_frame_list_list = NULL;
+    xen_pfn_t *p2m_frame_list = NULL;
+    xen_pfn_t *p2m = NULL;
+#endif
+
+    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
+    {
+        PERROR("Could not get domain info");
+        return rc;
+    }
+
+    /*
+     * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
+     */
+#if defined(__i386__) || defined(__x86_64__)
+    if ( info.hvm )
+        return xc_domain_resume_hvm(xch, domid);
+
+    if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
+    {
+        PERROR("Could not get domain width");
+        return rc;
+    }
+    if ( dinfo->guest_width != sizeof(long) )
+    {
+        ERROR("Cannot resume uncooperative cross-address-size guests");
+        return rc;
+    }
+
+    /* Map the shared info frame */
+    shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
+                                  PROT_READ, info.shared_info_frame);
+    if ( shinfo == NULL )
+    {
+        ERROR("Couldn't map shared info");
+        goto out;
+    }
+
+    dinfo->p2m_size = shinfo->arch.max_pfn;
+
+    p2m_frame_list_list =
+        xc_map_foreign_range(xch, domid, PAGE_SIZE, PROT_READ,
+                             shinfo->arch.pfn_to_mfn_frame_list_list);
+    if ( p2m_frame_list_list == NULL )
+    {
+        ERROR("Couldn't map p2m_frame_list_list");
+        goto out;
+    }
+
+    p2m_frame_list = xc_map_foreign_pages(xch, domid, PROT_READ,
+                                          p2m_frame_list_list,
+                                          P2M_FLL_ENTRIES);
+    if ( p2m_frame_list == NULL )
+    {
+        ERROR("Couldn't map p2m_frame_list");
+        goto out;
+    }
+
+    /* Map all the frames of the pfn->mfn table. For migrate to succeed,
+       the guest must not change which frames are used for this purpose.
+       (its not clear why it would want to change them, and we'll be OK
+       from a safety POV anyhow. */
+    p2m = xc_map_foreign_pages(xch, domid, PROT_READ,
+                               p2m_frame_list,
+                               P2M_FL_ENTRIES);
+    if ( p2m == NULL )
+    {
+        ERROR("Couldn't map p2m table");
+        goto out;
+    }
+
+    if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
+    {
+        ERROR("Could not get vcpu context");
+        goto out;
+    }
+
+    mfn = GET_FIELD(&ctxt, user_regs.edx, dinfo->guest_width);
+
+    start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
+                                      PROT_READ | PROT_WRITE, mfn);
+    if ( start_info == NULL )
+    {
+        ERROR("Couldn't map start_info");
+        goto out;
+    }
+
+    start_info->store_mfn        = p2m[start_info->store_mfn];
+    start_info->console.domU.mfn = p2m[start_info->console.domU.mfn];
+
+    munmap(start_info, PAGE_SIZE);
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+    /* Reset all secondary CPU states. */
+    for ( i = 1; i <= info.max_vcpu_id; i++ )
+        if ( xc_vcpu_setcontext(xch, domid, i, NULL) != 0 )
+        {
+            ERROR("Couldn't reset vcpu state");
+            goto out;
+        }
+
+    /* Ready to resume domain execution now. */
+    domctl.cmd = XEN_DOMCTL_resumedomain;
+    domctl.domain = domid;
+    rc = do_domctl(xch, &domctl);
+
+out:
+#if defined(__i386__) || defined(__x86_64__)
+    if (p2m)
+        munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
+    if (p2m_frame_list)
+        munmap(p2m_frame_list, P2M_FLL_ENTRIES*PAGE_SIZE);
+    if (p2m_frame_list_list)
+        munmap(p2m_frame_list_list, PAGE_SIZE);
+    if (shinfo)
+        munmap(shinfo, PAGE_SIZE);
+#endif
+
+    return rc;
+}
+
+/*
+ * Resume execution of a domain after suspend shutdown.
+ * This can happen in one of two ways:
+ *  1. (fast=1) Resume the guest without resetting the domain environment.
+ *     The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend) will return 1.
+ *
+ *  2. (fast=0) Reset guest environment so it believes it is resumed in a new
+ *     domain context. The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend)
+ *     will return 0.
+ *
+ * (1) should only by used for guests which can handle the special return
+ * code. Also note that the insertion of the return code is quite interesting
+ * and that the guest MUST be paused - otherwise we would be corrupting
+ * the guest vCPU state.
+ *
+ * (2) should be used only for guests which cannot handle the special
+ * new return code - and it is always safe (but slower).
+ */
+int xc_domain_resume(xc_interface *xch, uint32_t domid, int fast)
+{
+    return (fast
+            ? xc_domain_resume_cooperative(xch, domid)
+            : xc_domain_resume_any(xch, domid));
+}
diff --git a/tools/libs/ctrl/xc_rt.c b/tools/libs/ctrl/xc_rt.c
new file mode 100644 (file)
index 0000000..ad257c6
--- /dev/null
@@ -0,0 +1,132 @@
+/****************************************************************************
+ *
+ *        File: xc_rt.c
+ *      Author: Sisu Xi
+ *              Meng Xu
+ *
+ * Description: XC Interface to the rtds scheduler
+ * Note: VCPU's parameter (period, budget) is in microsecond (us).
+ *       All VCPUs of the same domain have same period and budget.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+int xc_sched_rtds_domain_set(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_sched_rtds *sdom)
+{
+    int rc;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
+    domctl.u.scheduler_op.u.rtds.period = sdom->period;
+    domctl.u.scheduler_op.u.rtds.budget = sdom->budget;
+
+    rc = do_domctl(xch, &domctl);
+
+    return rc;
+}
+
+int xc_sched_rtds_domain_get(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_sched_rtds *sdom)
+{
+    int rc;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
+
+    rc = do_domctl(xch, &domctl);
+
+    if ( rc == 0 )
+        *sdom = domctl.u.scheduler_op.u.rtds;
+
+    return rc;
+}
+
+int xc_sched_rtds_vcpu_set(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus)
+{
+    int rc = 0;
+    unsigned processed = 0;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(vcpus, sizeof(*vcpus) * num_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, vcpus) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putvcpuinfo;
+
+    while ( processed < num_vcpus )
+    {
+        domctl.u.scheduler_op.u.v.nr_vcpus = num_vcpus - processed;
+        set_xen_guest_handle_offset(domctl.u.scheduler_op.u.v.vcpus, vcpus,
+                                    processed);
+        if ( (rc = do_domctl(xch, &domctl)) != 0 )
+            break;
+        processed += domctl.u.scheduler_op.u.v.nr_vcpus;
+    }
+
+    xc_hypercall_bounce_post(xch, vcpus);
+
+    return rc;
+}
+
+int xc_sched_rtds_vcpu_get(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus)
+{
+    int rc = 0;
+    unsigned processed = 0;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(vcpus, sizeof(*vcpus) * num_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, vcpus) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getvcpuinfo;
+
+    while ( processed < num_vcpus )
+    {
+        domctl.u.scheduler_op.u.v.nr_vcpus = num_vcpus - processed;
+        set_xen_guest_handle_offset(domctl.u.scheduler_op.u.v.vcpus, vcpus,
+                                    processed);
+        if ( (rc = do_domctl(xch, &domctl)) != 0 )
+            break;
+        processed += domctl.u.scheduler_op.u.v.nr_vcpus;
+    }
+
+    xc_hypercall_bounce_post(xch, vcpus);
+
+    return rc;
+}
diff --git a/tools/libs/ctrl/xc_solaris.c b/tools/libs/ctrl/xc_solaris.c
new file mode 100644 (file)
index 0000000..5128f3f
--- /dev/null
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+#include <malloc.h>
+
+/* Optionally flush file to disk and discard page cache */
+void discard_file_cache(xc_interface *xch, int fd, int flush) 
+{
+    // TODO: Implement for Solaris!
+}
+
+void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
+{
+    return memalign(alignment, size);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/tools/libs/ctrl/xc_tbuf.c b/tools/libs/ctrl/xc_tbuf.c
new file mode 100644 (file)
index 0000000..283fbd1
--- /dev/null
@@ -0,0 +1,172 @@
+/******************************************************************************
+ * xc_tbuf.c
+ *
+ * API for manipulating and accessing trace buffer parameters
+ *
+ * Copyright (c) 2005, Rob Gardner
+ *
+ * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+#include <xen/trace.h>
+
+static int tbuf_enable(xc_interface *xch, int enable)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_tbuf_op;
+    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
+    if ( enable )
+        sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_enable;
+    else
+        sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_disable;
+
+    return xc_sysctl(xch, &sysctl);
+}
+
+int xc_tbuf_set_size(xc_interface *xch, unsigned long size)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_tbuf_op;
+    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
+    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_size;
+    sysctl.u.tbuf_op.size = size;
+
+    return xc_sysctl(xch, &sysctl);
+}
+
+int xc_tbuf_get_size(xc_interface *xch, unsigned long *size)
+{
+    struct t_info *t_info;
+    int rc;
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_tbuf_op;
+    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
+    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_get_info;
+
+    rc = xc_sysctl(xch, &sysctl);
+    if ( rc != 0 )
+        return rc;
+
+    t_info = xc_map_foreign_range(xch, DOMID_XEN,
+                    sysctl.u.tbuf_op.size, PROT_READ | PROT_WRITE,
+                    sysctl.u.tbuf_op.buffer_mfn);
+
+    if ( t_info == NULL || t_info->tbuf_size == 0 )
+        rc = -1;
+    else
+       *size = t_info->tbuf_size;
+
+    xenforeignmemory_unmap(xch->fmem, t_info, sysctl.u.tbuf_op.size);
+
+    return rc;
+}
+
+int xc_tbuf_enable(xc_interface *xch, unsigned long pages, unsigned long *mfn,
+                   unsigned long *size)
+{
+    DECLARE_SYSCTL;
+    int rc;
+
+    /*
+     * Ignore errors (at least for now) as we get an error if size is already
+     * set (since trace buffers cannot be reallocated). If we really have no
+     * buffers at all then tbuf_enable() will fail, so this is safe.
+     */
+    (void)xc_tbuf_set_size(xch, pages);
+
+    if ( tbuf_enable(xch, 1) != 0 )
+        return -1;
+
+    sysctl.cmd = XEN_SYSCTL_tbuf_op;
+    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
+    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_get_info;
+
+    rc = xc_sysctl(xch, &sysctl);
+    if ( rc == 0 )
+    {
+        *size = sysctl.u.tbuf_op.size;
+        *mfn = sysctl.u.tbuf_op.buffer_mfn;
+    }
+
+    return 0;
+}
+
+int xc_tbuf_disable(xc_interface *xch)
+{
+    return tbuf_enable(xch, 0);
+}
+
+int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask)
+{
+    DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(mask, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    int ret = -1;
+    int bits, cpusize;
+
+    cpusize = xc_get_cpumap_size(xch);
+    if (cpusize <= 0)
+    {
+        PERROR("Could not get number of cpus");
+        return -1;
+    }
+
+    HYPERCALL_BOUNCE_SET_SIZE(mask, cpusize);
+
+    bits = xc_get_max_cpus(xch);
+    if (bits <= 0)
+    {
+        PERROR("Could not get number of bits");
+        return -1;
+    }
+
+    if ( xc_hypercall_bounce_pre(xch, mask) )
+    {
+        PERROR("Could not allocate memory for xc_tbuf_set_cpu_mask hypercall");
+        goto out;
+    }
+
+    sysctl.cmd = XEN_SYSCTL_tbuf_op;
+    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
+    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_cpu_mask;
+
+    set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, mask);
+    sysctl.u.tbuf_op.cpu_mask.nr_bits = bits;
+
+    ret = do_sysctl(xch, &sysctl);
+
+    xc_hypercall_bounce_post(xch, mask);
+
+ out:
+    return ret;
+}
+
+int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask)
+{
+    DECLARE_SYSCTL;
+
+    sysctl.cmd = XEN_SYSCTL_tbuf_op;
+    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
+    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_evt_mask;
+    sysctl.u.tbuf_op.evt_mask = mask;
+
+    return do_sysctl(xch, &sysctl);
+}
+
diff --git a/tools/libs/ctrl/xc_vm_event.c b/tools/libs/ctrl/xc_vm_event.c
new file mode 100644 (file)
index 0000000..a97c615
--- /dev/null
@@ -0,0 +1,183 @@
+/******************************************************************************
+ *
+ * xc_vm_event.c
+ *
+ * Interface to low-level memory event functionality.
+ *
+ * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xc_private.h"
+
+int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
+                        unsigned int mode, uint32_t *port)
+{
+    DECLARE_DOMCTL;
+    int rc;
+
+    domctl.cmd = XEN_DOMCTL_vm_event_op;
+    domctl.domain = domain_id;
+    domctl.u.vm_event_op.op = op;
+    domctl.u.vm_event_op.mode = mode;
+
+    rc = do_domctl(xch, &domctl);
+    if ( !rc && port )
+        *port = domctl.u.vm_event_op.u.enable.port;
+    return rc;
+}
+
+void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
+                         uint32_t *port)
+{
+    void *ring_page = NULL;
+    uint64_t pfn;
+    xen_pfn_t ring_pfn, mmap_pfn;
+    unsigned int op, mode;
+    int rc1, rc2, saved_errno;
+
+    if ( !port )
+    {
+        errno = EINVAL;
+        return NULL;
+    }
+
+    /* Pause the domain for ring page setup */
+    rc1 = xc_domain_pause(xch, domain_id);
+    if ( rc1 != 0 )
+    {
+        PERROR("Unable to pause domain\n");
+        return NULL;
+    }
+
+    /* Get the pfn of the ring page */
+    rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
+    if ( rc1 != 0 )
+    {
+        PERROR("Failed to get pfn of ring page\n");
+        goto out;
+    }
+
+    ring_pfn = pfn;
+    mmap_pfn = pfn;
+    rc1 = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
+    if ( rc1 || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+    {
+        /* Page not in the physmap, try to populate it */
+        rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
+                                              &ring_pfn);
+        if ( rc1 != 0 )
+        {
+            PERROR("Failed to populate ring pfn\n");
+            goto out;
+        }
+    }
+
+    mmap_pfn = ring_pfn;
+    ring_page = xc_map_foreign_pages(xch, domain_id, PROT_READ | PROT_WRITE,
+                                         &mmap_pfn, 1);
+    if ( !ring_page )
+    {
+        PERROR("Could not map the ring page\n");
+        goto out;
+    }
+
+    switch ( param )
+    {
+    case HVM_PARAM_PAGING_RING_PFN:
+        op = XEN_VM_EVENT_ENABLE;
+        mode = XEN_DOMCTL_VM_EVENT_OP_PAGING;
+        break;
+
+    case HVM_PARAM_MONITOR_RING_PFN:
+        op = XEN_VM_EVENT_ENABLE;
+        mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
+        break;
+
+    case HVM_PARAM_SHARING_RING_PFN:
+        op = XEN_VM_EVENT_ENABLE;
+        mode = XEN_DOMCTL_VM_EVENT_OP_SHARING;
+        break;
+
+    /*
+     * This is for the outside chance that the HVM_PARAM is valid but is invalid
+     * as far as vm_event goes.
+     */
+    default:
+        errno = EINVAL;
+        rc1 = -1;
+        goto out;
+    }
+
+    rc1 = xc_vm_event_control(xch, domain_id, op, mode, port);
+    if ( rc1 != 0 )
+    {
+        PERROR("Failed to enable vm_event\n");
+        goto out;
+    }
+
+    /* Remove the ring_pfn from the guest's physmap */
+    rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
+    if ( rc1 != 0 )
+        PERROR("Failed to remove ring page from guest physmap");
+
+ out:
+    saved_errno = errno;
+
+    rc2 = xc_domain_unpause(xch, domain_id);
+    if ( rc1 != 0 || rc2 != 0 )
+    {
+        if ( rc2 != 0 )
+        {
+            if ( rc1 == 0 )
+                saved_errno = errno;
+            PERROR("Unable to unpause domain");
+        }
+
+        if ( ring_page )
+            xenforeignmemory_unmap(xch->fmem, ring_page, 1);
+        ring_page = NULL;
+
+        errno = saved_errno;
+    }
+
+    return ring_page;
+}
+
+int xc_vm_event_get_version(xc_interface *xch)
+{
+    DECLARE_DOMCTL;
+    int rc;
+
+    domctl.cmd = XEN_DOMCTL_vm_event_op;
+    domctl.domain = DOMID_INVALID;
+    domctl.u.vm_event_op.op = XEN_VM_EVENT_GET_VERSION;
+    domctl.u.vm_event_op.mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
+
+    rc = do_domctl(xch, &domctl);
+    if ( !rc )
+        rc = domctl.u.vm_event_op.u.version;
+    return rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
index db73fe314e40e26c0cb8ebbe50ca7813323a1bb7..8e45e8d9177ada247fbb46cdf50be4b15aa22667 100644 (file)
@@ -16,3 +16,5 @@ LIBS_LIBS += devicemodel
 USELIBS_devicemodel := toollog toolcore call
 LIBS_LIBS += hypfs
 USELIBS_hypfs := toollog toolcore call
+LIBS_LIBS += ctrl
+USELIBS_ctrl := toollog call evtchn gnttab foreignmemory devicemodel
index faf2639894df3c7ec792864347397a9f408a68ff..44fa0488c92c450badfe66e651a8058064e51ffe 100644 (file)
@@ -9,47 +9,10 @@ ifeq ($(CONFIG_LIBXC_MINIOS),y)
 override CONFIG_MIGRATE := n
 endif
 
-CTRL_SRCS-y       :=
-CTRL_SRCS-y       += xc_altp2m.c
-CTRL_SRCS-y       += xc_core.c
-CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
-CTRL_SRCS-$(CONFIG_ARM) += xc_core_arm.c
-CTRL_SRCS-y       += xc_cpupool.c
-CTRL_SRCS-y       += xc_domain.c
-CTRL_SRCS-y       += xc_evtchn.c
-CTRL_SRCS-y       += xc_gnttab.c
-CTRL_SRCS-y       += xc_misc.c
-CTRL_SRCS-y       += xc_flask.c
-CTRL_SRCS-y       += xc_physdev.c
-CTRL_SRCS-y       += xc_private.c
-CTRL_SRCS-y       += xc_csched.c
-CTRL_SRCS-y       += xc_csched2.c
-CTRL_SRCS-y       += xc_arinc653.c
-CTRL_SRCS-y       += xc_rt.c
-CTRL_SRCS-y       += xc_tbuf.c
-CTRL_SRCS-y       += xc_pm.c
-CTRL_SRCS-y       += xc_cpu_hotplug.c
-CTRL_SRCS-y       += xc_resume.c
-CTRL_SRCS-y       += xc_vm_event.c
-CTRL_SRCS-y       += xc_monitor.c
-CTRL_SRCS-y       += xc_mem_paging.c
-CTRL_SRCS-y       += xc_mem_access.c
-CTRL_SRCS-y       += xc_memshr.c
-CTRL_SRCS-y       += xc_hcall_buf.c
-CTRL_SRCS-y       += xc_foreign_memory.c
-CTRL_SRCS-y       += xc_kexec.c
-CTRL_SRCS-y       += xc_resource.c
-CTRL_SRCS-$(CONFIG_X86) += xc_psr.c
-CTRL_SRCS-$(CONFIG_X86) += xc_pagetab.c
-CTRL_SRCS-$(CONFIG_Linux) += xc_linux.c
-CTRL_SRCS-$(CONFIG_FreeBSD) += xc_freebsd.c
-CTRL_SRCS-$(CONFIG_SunOS) += xc_solaris.c
-CTRL_SRCS-$(CONFIG_NetBSD) += xc_netbsd.c
-CTRL_SRCS-$(CONFIG_NetBSDRump) += xc_netbsd.c
-CTRL_SRCS-$(CONFIG_MiniOS) += xc_minios.c
-CTRL_SRCS-y       += xc_evtchn_compat.c
-CTRL_SRCS-y       += xc_gnttab_compat.c
-CTRL_SRCS-y       += xc_devicemodel_compat.c
+LINK_FILES := xc_private.h xc_core.h xc_core_x86.h xc_core_arm.h xc_bitops.h
+
+$(LINK_FILES):
+       ln -sf $(XEN_ROOT)/tools/libs/ctrl/$(notdir $@) $@
 
 GUEST_SRCS-y :=
 GUEST_SRCS-y += xg_private.c
@@ -124,26 +87,14 @@ CFLAGS     += $(CFLAGS_libxentoollog)
 CFLAGS += $(CFLAGS_libxenevtchn)
 CFLAGS += $(CFLAGS_libxendevicemodel)
 
-CTRL_LIB_OBJS := $(patsubst %.c,%.o,$(CTRL_SRCS-y))
-CTRL_PIC_OBJS := $(patsubst %.c,%.opic,$(CTRL_SRCS-y))
-
 GUEST_LIB_OBJS := $(patsubst %.c,%.o,$(GUEST_SRCS-y))
 GUEST_PIC_OBJS := $(patsubst %.c,%.opic,$(GUEST_SRCS-y))
 
-$(CTRL_LIB_OBJS) $(GUEST_LIB_OBJS) \
-$(CTRL_PIC_OBJS) $(GUEST_PIC_OBJS): CFLAGS += -include $(XEN_ROOT)/tools/config.h
+$(GUEST_LIB_OBJS) $(GUEST_PIC_OBJS): CFLAGS += -include $(XEN_ROOT)/tools/config.h
 
 # libxenguest includes xc_private.h, so needs this despite not using
 # this functionality directly.
-$(CTRL_LIB_OBJS) $(GUEST_LIB_OBJS) \
-$(CTRL_PIC_OBJS) $(GUEST_PIC_OBJS): CFLAGS += $(CFLAGS_libxencall) $(CFLAGS_libxenforeignmemory)
-
-$(CTRL_LIB_OBJS) $(CTRL_PIC_OBJS): CFLAGS += $(CFLAGS_libxengnttab)
-
-LIB := libxenctrl.a
-ifneq ($(nosharedlibs),y)
-LIB += libxenctrl.so libxenctrl.so.$(MAJOR) libxenctrl.so.$(MAJOR).$(MINOR)
-endif
+$(GUEST_LIB_OBJS) $(GUEST_PIC_OBJS): CFLAGS += $(CFLAGS_libxencall) $(CFLAGS_libxenforeignmemory)
 
 LIB += libxenguest.a
 ifneq ($(nosharedlibs),y)
@@ -155,28 +106,17 @@ $(eval $(genpath-target))
 
 xc_private.h: _paths.h
 
-$(CTRL_LIB_OBJS) $(GUEST_LIB_OBJS) \
-$(CTRL_PIC_OBJS) $(GUEST_PIC_OBJS): xc_private.h
+$(GUEST_LIB_OBJS) $(GUEST_PIC_OBJS): $(LINK_FILES)
 
-PKG_CONFIG := xencontrol.pc xenguest.pc
+PKG_CONFIG := xenguest.pc
 PKG_CONFIG_VERSION := $(MAJOR).$(MINOR)
 
-xencontrol.pc: PKG_CONFIG_NAME = Xencontrol
-xencontrol.pc: PKG_CONFIG_DESC = The Xencontrol library for Xen hypervisor
-xencontrol.pc: PKG_CONFIG_USELIBS = $(SHLIB_libxenctrl)
-xencontrol.pc: PKG_CONFIG_LIB = xenctrl
-xencontrol.pc: PKG_CONFIG_REQPRIV = xenevtchn,xengnttab,xencall,xenforeignmemory,xendevicemodel,xentoollog
 xenguest.pc: PKG_CONFIG_NAME = Xenguest
 xenguest.pc: PKG_CONFIG_DESC = The Xenguest library for Xen hypervisor
 xenguest.pc: PKG_CONFIG_USELIBS = $(SHLIB_libxenguest)
 xenguest.pc: PKG_CONFIG_LIB = xenguest
 xenguest.pc: PKG_CONFIG_REQPRIV = xentoollog,xencall,xenforeignmemory,xenevtchn
 
-$(PKG_CONFIG_DIR)/xencontrol.pc: PKG_CONFIG_NAME = Xencontrol
-$(PKG_CONFIG_DIR)/xencontrol.pc: PKG_CONFIG_DESC = The Xencontrol library for Xen hypervisor
-$(PKG_CONFIG_DIR)/xencontrol.pc: PKG_CONFIG_USELIBS = $(SHLIB_libxenctrl)
-$(PKG_CONFIG_DIR)/xencontrol.pc: PKG_CONFIG_LIB = xenctrl
-$(PKG_CONFIG_DIR)/xencontrol.pc: PKG_CONFIG_REQPRIV = xenevtchn,xengnttab,xencall,xenforeignmemory,xendevicemodel,xentoollog
 $(PKG_CONFIG_DIR)/xenguest.pc: PKG_CONFIG_NAME = Xenguest
 $(PKG_CONFIG_DIR)/xenguest.pc: PKG_CONFIG_DESC = The Xenguest library for Xen hypervisor
 $(PKG_CONFIG_DIR)/xenguest.pc: PKG_CONFIG_USELIBS = $(SHLIB_libxenguest)
@@ -211,17 +151,11 @@ libs: $(LIB) $(PKG_CONFIG_INST) $(PKG_CONFIG_LOCAL)
 install: build
        $(INSTALL_DIR) $(DESTDIR)$(libdir)
        $(INSTALL_DIR) $(DESTDIR)$(includedir)
-       $(INSTALL_SHLIB) libxenctrl.so.$(MAJOR).$(MINOR) $(DESTDIR)$(libdir)
-       $(INSTALL_DATA) libxenctrl.a $(DESTDIR)$(libdir)
-       $(SYMLINK_SHLIB) libxenctrl.so.$(MAJOR).$(MINOR) $(DESTDIR)$(libdir)/libxenctrl.so.$(MAJOR)
-       $(SYMLINK_SHLIB) libxenctrl.so.$(MAJOR) $(DESTDIR)$(libdir)/libxenctrl.so
-       $(INSTALL_DATA) include/xenctrl.h include/xenctrl_compat.h include/xenctrl_dom.h $(DESTDIR)$(includedir)
        $(INSTALL_SHLIB) libxenguest.so.$(MAJOR).$(MINOR) $(DESTDIR)$(libdir)
        $(INSTALL_DATA) libxenguest.a $(DESTDIR)$(libdir)
        $(SYMLINK_SHLIB) libxenguest.so.$(MAJOR).$(MINOR) $(DESTDIR)$(libdir)/libxenguest.so.$(MAJOR)
        $(SYMLINK_SHLIB) libxenguest.so.$(MAJOR) $(DESTDIR)$(libdir)/libxenguest.so
        $(INSTALL_DATA) include/xenguest.h $(DESTDIR)$(includedir)
-       $(INSTALL_DATA) xencontrol.pc $(DESTDIR)$(PKG_INSTALLDIR)
        $(INSTALL_DATA) xenguest.pc $(DESTDIR)$(PKG_INSTALLDIR)
 
 .PHONY: uninstall
@@ -232,14 +166,6 @@ uninstall:
        rm -f $(DESTDIR)$(libdir)/libxenguest.so.$(MAJOR)
        rm -f $(DESTDIR)$(libdir)/libxenguest.so.$(MAJOR).$(MINOR)
        rm -f $(DESTDIR)$(libdir)/libxenguest.a
-       rm -f $(DESTDIR)$(PKG_INSTALLDIR)/xencontrol.pc
-       rm -f $(DESTDIR)$(includedir)/xenctrl.h
-       rm -f $(DESTDIR)$(includedir)/xenctrl_compat.h
-       rm -f $(DESTDIR)$(includedir)/xenctrl_dom.h
-       rm -f $(DESTDIR)$(libdir)/libxenctrl.so
-       rm -f $(DESTDIR)$(libdir)/libxenctrl.so.$(MAJOR)
-       rm -f $(DESTDIR)$(libdir)/libxenctrl.so.$(MAJOR).$(MINOR)
-       rm -f $(DESTDIR)$(libdir)/libxenctrl.a
 
 .PHONY: TAGS
 TAGS:
@@ -249,8 +175,8 @@ TAGS:
 clean:
        rm -rf *.rpm $(LIB) *~ $(DEPS_RM) \
             _paths.h \
-           xencontrol.pc xenguest.pc \
-            $(CTRL_LIB_OBJS) $(CTRL_PIC_OBJS) \
+           $(LINK_FILES) \
+           xenguest.pc \
             $(GUEST_LIB_OBJS) $(GUEST_PIC_OBJS)
 
 .PHONY: distclean
@@ -266,19 +192,6 @@ rpm: build
        mv staging/i386/*.rpm .
        rm -rf staging
 
-# libxenctrl
-
-libxenctrl.a: $(CTRL_LIB_OBJS)
-       $(AR) rc $@ $^
-
-libxenctrl.so: libxenctrl.so.$(MAJOR)
-       $(SYMLINK_SHLIB) $< $@
-libxenctrl.so.$(MAJOR): libxenctrl.so.$(MAJOR).$(MINOR)
-       $(SYMLINK_SHLIB) $< $@
-
-libxenctrl.so.$(MAJOR).$(MINOR): $(CTRL_PIC_OBJS)
-       $(CC) $(LDFLAGS) $(PTHREAD_LDFLAGS) -Wl,$(SONAME_LDFLAG) -Wl,libxenctrl.so.$(MAJOR) $(SHLIB_LDFLAGS) -o $@ $^ $(LDLIBS_libxentoollog) $(LDLIBS_libxenevtchn) $(LDLIBS_libxengnttab) $(LDLIBS_libxencall) $(LDLIBS_libxenforeignmemory) $(LDLIBS_libxendevicemodel) $(PTHREAD_LIBS) $(APPEND_LDFLAGS)
-
 # libxenguest
 
 libxenguest.a: $(GUEST_LIB_OBJS)
@@ -299,7 +212,7 @@ xc_dom_bzimageloader.o: CFLAGS += $(filter -D%,$(zlib-options))
 xc_dom_bzimageloader.opic: CFLAGS += $(filter -D%,$(zlib-options))
 
 libxenguest.so.$(MAJOR).$(MINOR): COMPRESSION_LIBS = $(filter -l%,$(zlib-options))
-libxenguest.so.$(MAJOR).$(MINOR): $(GUEST_PIC_OBJS) libxenctrl.so
+libxenguest.so.$(MAJOR).$(MINOR): $(GUEST_PIC_OBJS)
        $(CC) $(LDFLAGS) -Wl,$(SONAME_LDFLAG) -Wl,libxenguest.so.$(MAJOR) $(SHLIB_LDFLAGS) -o $@ $(GUEST_PIC_OBJS) $(COMPRESSION_LIBS) -lz $(LDLIBS_libxenevtchn) $(LDLIBS_libxenctrl) $(PTHREAD_LIBS) $(APPEND_LDFLAGS)
 
 -include $(DEPS_INCLUDE)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
deleted file mode 100644 (file)
index 4c89b72..0000000
+++ /dev/null
@@ -1,2668 +0,0 @@
-/******************************************************************************
- * xenctrl.h
- *
- * A library for low-level access to the Xen control interfaces.
- *
- * Copyright (c) 2003-2004, K A Fraser.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef XENCTRL_H
-#define XENCTRL_H
-
-/* Tell the Xen public headers we are a user-space tools build. */
-#ifndef __XEN_TOOLS__
-#define __XEN_TOOLS__ 1
-#endif
-
-#include <unistd.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <xen/xen.h>
-#include <xen/domctl.h>
-#include <xen/physdev.h>
-#include <xen/sysctl.h>
-#include <xen/version.h>
-#include <xen/event_channel.h>
-#include <xen/sched.h>
-#include <xen/memory.h>
-#include <xen/grant_table.h>
-#include <xen/hvm/dm_op.h>
-#include <xen/hvm/params.h>
-#include <xen/xsm/flask_op.h>
-#include <xen/kexec.h>
-#include <xen/platform.h>
-
-#include "xentoollog.h"
-
-#if defined(__i386__) || defined(__x86_64__)
-#include <xen/foreign/x86_32.h>
-#include <xen/foreign/x86_64.h>
-#include <xen/arch-x86/xen-mca.h>
-#endif
-
-#define XC_PAGE_SHIFT           12
-#define XC_PAGE_SIZE            (1UL << XC_PAGE_SHIFT)
-#define XC_PAGE_MASK            (~(XC_PAGE_SIZE-1))
-
-#define INVALID_MFN  (~0UL)
-
-/*
- *  DEFINITIONS FOR CPU BARRIERS
- */
-
-#define xen_barrier() asm volatile ( "" : : : "memory")
-
-#if defined(__i386__)
-#define xen_mb()  asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
-#define xen_rmb() xen_barrier()
-#define xen_wmb() xen_barrier()
-#elif defined(__x86_64__)
-#define xen_mb()  asm volatile ( "mfence" : : : "memory")
-#define xen_rmb() xen_barrier()
-#define xen_wmb() xen_barrier()
-#elif defined(__arm__)
-#define xen_mb()   asm volatile ("dmb" : : : "memory")
-#define xen_rmb()  asm volatile ("dmb" : : : "memory")
-#define xen_wmb()  asm volatile ("dmb" : : : "memory")
-#elif defined(__aarch64__)
-#define xen_mb()   asm volatile ("dmb sy" : : : "memory")
-#define xen_rmb()  asm volatile ("dmb sy" : : : "memory")
-#define xen_wmb()  asm volatile ("dmb sy" : : : "memory")
-#else
-#error "Define barriers"
-#endif
-
-
-#define XENCTRL_HAS_XC_INTERFACE 1
-/* In Xen 4.0 and earlier, xc_interface_open and xc_evtchn_open would
- * both return ints being the file descriptor.  In 4.1 and later, they
- * return an xc_interface* and xc_evtchn*, respectively - ie, a
- * pointer to an opaque struct.  This #define is provided in 4.1 and
- * later, allowing out-of-tree callers to more easily distinguish
- * between, and be compatible with, both versions.
- */
-
-
-/*
- *  GENERAL
- *
- * Unless otherwise specified, each function here returns zero or a
- * non-null pointer on success; or in case of failure, sets errno and
- * returns -1 or a null pointer.
- *
- * Unless otherwise specified, errors result in a call to the error
- * handler function, which by default prints a message to the
- * FILE* passed as the caller_data, which by default is stderr.
- * (This is described below as "logging errors".)
- *
- * The error handler can safely trash errno, as libxc saves it across
- * the callback.
- */
-
-typedef struct xc_interface_core xc_interface;
-
-enum xc_error_code {
-  XC_ERROR_NONE = 0,
-  XC_INTERNAL_ERROR = 1,
-  XC_INVALID_KERNEL = 2,
-  XC_INVALID_PARAM = 3,
-  XC_OUT_OF_MEMORY = 4,
-  /* new codes need to be added to xc_error_level_to_desc too */
-};
-
-typedef enum xc_error_code xc_error_code;
-
-
-/*
- *  INITIALIZATION FUNCTIONS
- */
-
-/**
- * This function opens a handle to the hypervisor interface.  This function can
- * be called multiple times within a single process.  Multiple processes can
- * have an open hypervisor interface at the same time.
- *
- * Note:
- * After fork a child process must not use any opened xc interface
- * handle inherited from their parent. They must open a new handle if
- * they want to interact with xc.
- *
- * Each call to this function should have a corresponding call to
- * xc_interface_close().
- *
- * This function can fail if the caller does not have superuser permission or
- * if a Xen-enabled kernel is not currently running.
- *
- * @return a handle to the hypervisor interface
- */
-xc_interface *xc_interface_open(xentoollog_logger *logger,
-                                xentoollog_logger *dombuild_logger,
-                                unsigned open_flags);
-  /* if logger==NULL, will log to stderr
-   * if dombuild_logger=NULL, will log to a file
-   */
-
-/*
- * Note: if XC_OPENFLAG_NON_REENTRANT is passed then libxc must not be
- * called reentrantly and the calling application is responsible for
- * providing mutual exclusion surrounding all libxc calls itself.
- *
- * In particular xc_{get,clear}_last_error only remain valid for the
- * duration of the critical section containing the call which failed.
- */
-enum xc_open_flags {
-    XC_OPENFLAG_DUMMY =  1<<0, /* do not actually open a xenctrl interface */
-    XC_OPENFLAG_NON_REENTRANT = 1<<1, /* assume library is only every called from a single thread */
-};
-
-/**
- * This function closes an open hypervisor interface.
- *
- * This function can fail if the handle does not represent an open interface or
- * if there were problems closing the interface.  In the latter case
- * the interface is still closed.
- *
- * @parm xch a handle to an open hypervisor interface
- * @return 0 on success, -1 otherwise.
- */
-int xc_interface_close(xc_interface *xch);
-
-/**
- * Return the handles which xch has opened and will use for
- * hypercalls, foreign memory accesses and device model operations.
- * These may be used with the corresponding libraries so long as the
- * xch itself remains open.
- */
-struct xencall_handle *xc_interface_xcall_handle(xc_interface *xch);
-struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch);
-struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch);
-
-/*
- * HYPERCALL SAFE MEMORY BUFFER
- *
- * Ensure that memory which is passed to a hypercall has been
- * specially allocated in order to be safe to access from the
- * hypervisor.
- *
- * Each user data pointer is shadowed by an xc_hypercall_buffer data
- * structure. You should never define an xc_hypercall_buffer type
- * directly, instead use the DECLARE_HYPERCALL_BUFFER* macros below.
- *
- * The strucuture should be considered opaque and all access should be
- * via the macros and helper functions defined below.
- *
- * Once the buffer is declared the user is responsible for explicitly
- * allocating and releasing the memory using
- * xc_hypercall_buffer_alloc(_pages) and
- * xc_hypercall_buffer_free(_pages).
- *
- * Once the buffer has been allocated the user can initialise the data
- * via the normal pointer. The xc_hypercall_buffer structure is
- * transparently referenced by the helper macros (such as
- * xen_set_guest_handle) in order to check at compile time that the
- * correct type of memory is being used.
- */
-struct xc_hypercall_buffer {
-    /* Hypercall safe memory buffer. */
-    void *hbuf;
-
-    /*
-     * Reference to xc_hypercall_buffer passed as argument to the
-     * current function.
-     */
-    struct xc_hypercall_buffer *param_shadow;
-
-    /*
-     * Direction of copy for bounce buffering.
-     */
-    int dir;
-
-    /* Used iff dir != 0. */
-    void *ubuf;
-    size_t sz;
-};
-typedef struct xc_hypercall_buffer xc_hypercall_buffer_t;
-
-/*
- * Construct the name of the hypercall buffer for a given variable.
- * For internal use only
- */
-#define XC__HYPERCALL_BUFFER_NAME(_name) xc__hypercall_buffer_##_name
-
-/*
- * Returns the hypercall_buffer associated with a variable.
- */
-#define HYPERCALL_BUFFER(_name)                                 \
-    ({  xc_hypercall_buffer_t _hcbuf_buf1;                      \
-        typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_buf2 = \
-                &XC__HYPERCALL_BUFFER_NAME(_name);              \
-        (void)(&_hcbuf_buf1 == _hcbuf_buf2);                    \
-        (_hcbuf_buf2)->param_shadow ?                           \
-                (_hcbuf_buf2)->param_shadow : (_hcbuf_buf2);    \
-     })
-
-#define HYPERCALL_BUFFER_INIT_NO_BOUNCE .dir = 0, .sz = 0, .ubuf = (void *)-1
-
-/*
- * Defines a hypercall buffer and user pointer with _name of _type.
- *
- * The user accesses the data as normal via _name which will be
- * transparently converted to the hypercall buffer as necessary.
- */
-#define DECLARE_HYPERCALL_BUFFER(_type, _name)                 \
-    _type *(_name) = NULL;                                     \
-    xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
-        .hbuf = NULL,                                          \
-        .param_shadow = NULL,                                  \
-        HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
-    }
-
-/*
- * Like DECLARE_HYPERCALL_BUFFER() but using an already allocated
- * hypercall buffer, _hbuf.
- *
- * Useful when a hypercall buffer is passed to a function and access
- * via the user pointer is required.
- *
- * See DECLARE_HYPERCALL_BUFFER_ARGUMENT() if the user pointer is not
- * required.
- */
-#define DECLARE_HYPERCALL_BUFFER_SHADOW(_type, _name, _hbuf)   \
-    _type *(_name) = (_hbuf)->hbuf;                            \
-    __attribute__((unused))                                    \
-    xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
-        .hbuf = (void *)-1,                                    \
-        .param_shadow = (_hbuf),                               \
-        HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
-    }
-
-/*
- * Declare the necessary data structure to allow a hypercall buffer
- * passed as an argument to a function to be used in the normal way.
- */
-#define DECLARE_HYPERCALL_BUFFER_ARGUMENT(_name)               \
-    xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
-        .hbuf = (void *)-1,                                    \
-        .param_shadow = (_name),                               \
-        HYPERCALL_BUFFER_INIT_NO_BOUNCE                        \
-    }
-
-/*
- * Get the hypercall buffer data pointer in a form suitable for use
- * directly as a hypercall argument.
- */
-#define HYPERCALL_BUFFER_AS_ARG(_name)                          \
-    ({  xc_hypercall_buffer_t _hcbuf_arg1;                      \
-        typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_hcbuf_arg2 = \
-                HYPERCALL_BUFFER(_name);                        \
-        (void)(&_hcbuf_arg1 == _hcbuf_arg2);                    \
-        (unsigned long)(_hcbuf_arg2)->hbuf;                     \
-     })
-
-/*
- * Set a xen_guest_handle in a type safe manner, ensuring that the
- * data pointer has been correctly allocated.
- */
-#define set_xen_guest_handle_impl(_hnd, _val, _byte_off)        \
-    do {                                                        \
-        xc_hypercall_buffer_t _hcbuf_hnd1;                      \
-        typeof(XC__HYPERCALL_BUFFER_NAME(_val)) *_hcbuf_hnd2 =  \
-                HYPERCALL_BUFFER(_val);                         \
-        (void) (&_hcbuf_hnd1 == _hcbuf_hnd2);                   \
-        set_xen_guest_handle_raw(_hnd,                          \
-                (_hcbuf_hnd2)->hbuf + (_byte_off));             \
-    } while (0)
-
-#undef set_xen_guest_handle
-#define set_xen_guest_handle(_hnd, _val)                        \
-    set_xen_guest_handle_impl(_hnd, _val, 0)
-
-#define set_xen_guest_handle_offset(_hnd, _val, _off)           \
-    set_xen_guest_handle_impl(_hnd, _val,                       \
-            ((sizeof(*_val)*(_off))))
-
-/* Use with set_xen_guest_handle in place of NULL */
-extern xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL);
-
-/*
- * Allocate and free hypercall buffers with byte granularity.
- */
-void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size);
-#define xc_hypercall_buffer_alloc(_xch, _name, _size) xc__hypercall_buffer_alloc(_xch, HYPERCALL_BUFFER(_name), _size)
-void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b);
-#define xc_hypercall_buffer_free(_xch, _name) xc__hypercall_buffer_free(_xch, HYPERCALL_BUFFER(_name))
-
-/*
- * Allocate and free hypercall buffers with page alignment.
- */
-void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
-#define xc_hypercall_buffer_alloc_pages(_xch, _name, _nr) xc__hypercall_buffer_alloc_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
-void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
-#define xc_hypercall_buffer_free_pages(_xch, _name, _nr)                    \
-    do {                                                                    \
-        if ( _name )                                                        \
-            xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name),  \
-                                            _nr);                           \
-    } while (0)
-
-/*
- * Array of hypercall buffers.
- *
- * Create an array with xc_hypercall_buffer_array_create() and
- * populate it by declaring one hypercall buffer in a loop and
- * allocating the buffer with xc_hypercall_buffer_array_alloc().
- *
- * To access a previously allocated buffers, declare a new hypercall
- * buffer and call xc_hypercall_buffer_array_get().
- *
- * Destroy the array with xc_hypercall_buffer_array_destroy() to free
- * the array and all its allocated hypercall buffers.
- */
-struct xc_hypercall_buffer_array;
-typedef struct xc_hypercall_buffer_array xc_hypercall_buffer_array_t;
-
-xc_hypercall_buffer_array_t *xc_hypercall_buffer_array_create(xc_interface *xch, unsigned n);
-void *xc__hypercall_buffer_array_alloc(xc_interface *xch, xc_hypercall_buffer_array_t *array,
-                                       unsigned index, xc_hypercall_buffer_t *hbuf, size_t size);
-#define xc_hypercall_buffer_array_alloc(_xch, _array, _index, _name, _size) \
-    xc__hypercall_buffer_array_alloc(_xch, _array, _index, HYPERCALL_BUFFER(_name), _size)
-void *xc__hypercall_buffer_array_get(xc_interface *xch, xc_hypercall_buffer_array_t *array,
-                                     unsigned index, xc_hypercall_buffer_t *hbuf);
-#define xc_hypercall_buffer_array_get(_xch, _array, _index, _name, _size) \
-    xc__hypercall_buffer_array_get(_xch, _array, _index, HYPERCALL_BUFFER(_name))
-void xc_hypercall_buffer_array_destroy(xc_interface *xc, xc_hypercall_buffer_array_t *array);
-
-/*
- * CPUMAP handling
- */
-typedef uint8_t *xc_cpumap_t;
-
-/* return maximum number of cpus the hypervisor supports */
-int xc_get_max_cpus(xc_interface *xch);
-
-/* return the number of online cpus */
-int xc_get_online_cpus(xc_interface *xch);
-
-/* return array size for cpumap */
-int xc_get_cpumap_size(xc_interface *xch);
-
-/* allocate a cpumap */
-xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
-
-/* clear an CPU from the cpumap. */
-void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map);
-
-/* set an CPU in the cpumap. */
-void xc_cpumap_setcpu(int cpu, xc_cpumap_t map);
-
-/* Test whether the CPU in cpumap is set. */
-int xc_cpumap_testcpu(int cpu, xc_cpumap_t map);
-
-/*
- * NODEMAP handling
- */
-typedef uint8_t *xc_nodemap_t;
-
-/* return maximum number of NUMA nodes the hypervisor supports */
-int xc_get_max_nodes(xc_interface *xch);
-
-/* return array size for nodemap */
-int xc_get_nodemap_size(xc_interface *xch);
-
-/* allocate a nodemap */
-xc_nodemap_t xc_nodemap_alloc(xc_interface *xch);
-
-/*
- * DOMAIN DEBUGGING FUNCTIONS
- */
-
-typedef struct xc_core_header {
-    unsigned int xch_magic;
-    unsigned int xch_nr_vcpus;
-    unsigned int xch_nr_pages;
-    unsigned int xch_ctxt_offset;
-    unsigned int xch_index_offset;
-    unsigned int xch_pages_offset;
-} xc_core_header_t;
-
-#define XC_CORE_MAGIC     0xF00FEBED
-#define XC_CORE_MAGIC_HVM 0xF00FEBEE
-
-/*
- * DOMAIN MANAGEMENT FUNCTIONS
- */
-
-typedef struct xc_dominfo {
-    uint32_t      domid;
-    uint32_t      ssidref;
-    unsigned int  dying:1, crashed:1, shutdown:1,
-                  paused:1, blocked:1, running:1,
-                  hvm:1, debugged:1, xenstore:1, hap:1;
-    unsigned int  shutdown_reason; /* only meaningful if shutdown==1 */
-    unsigned long nr_pages; /* current number, not maximum */
-    unsigned long nr_outstanding_pages;
-    unsigned long nr_shared_pages;
-    unsigned long nr_paged_pages;
-    unsigned long shared_info_frame;
-    uint64_t      cpu_time;
-    unsigned long max_memkb;
-    unsigned int  nr_online_vcpus;
-    unsigned int  max_vcpu_id;
-    xen_domain_handle_t handle;
-    unsigned int  cpupool;
-    struct xen_arch_domainconfig arch_config;
-} xc_dominfo_t;
-
-typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
-
-typedef union 
-{
-#if defined(__i386__) || defined(__x86_64__)
-    vcpu_guest_context_x86_64_t x64;
-    vcpu_guest_context_x86_32_t x32;   
-#endif
-    vcpu_guest_context_t c;
-} vcpu_guest_context_any_t;
-
-typedef union
-{
-#if defined(__i386__) || defined(__x86_64__)
-    shared_info_x86_64_t x64;
-    shared_info_x86_32_t x32;
-#endif
-    shared_info_t s;
-} shared_info_any_t;
-
-#if defined(__i386__) || defined(__x86_64__)
-typedef union
-{
-    start_info_x86_64_t x64;
-    start_info_x86_32_t x32;
-    start_info_t s;
-} start_info_any_t;
-#endif
-
-typedef struct xc_vcpu_extstate {
-    uint64_t xfeature_mask;
-    uint64_t size;
-    void *buffer;
-} xc_vcpu_extstate_t;
-
-int xc_domain_create(xc_interface *xch, uint32_t *pdomid,
-                     struct xen_domctl_createdomain *config);
-
-
-/* Functions to produce a dump of a given domain
- *  xc_domain_dumpcore - produces a dump to a specified file
- *  xc_domain_dumpcore_via_callback - produces a dump, using a specified
- *                                    callback function
- */
-int xc_domain_dumpcore(xc_interface *xch,
-                       uint32_t domid,
-                       const char *corename);
-
-/* Define the callback function type for xc_domain_dumpcore_via_callback.
- *
- * This function is called by the coredump code for every "write",
- * and passes an opaque object for the use of the function and
- * created by the caller of xc_domain_dumpcore_via_callback.
- */
-typedef int (dumpcore_rtn_t)(xc_interface *xch,
-                             void *arg, char *buffer, unsigned int length);
-
-int xc_domain_dumpcore_via_callback(xc_interface *xch,
-                                    uint32_t domid,
-                                    void *arg,
-                                    dumpcore_rtn_t dump_rtn);
-
-/*
- * This function sets the maximum number of vcpus that a domain may create.
- *
- * @parm xch a handle to an open hypervisor interface.
- * @parm domid the domain id in which vcpus are to be created.
- * @parm max the maximum number of vcpus that the domain may create.
- * @return 0 on success, -1 on failure.
- */
-int xc_domain_max_vcpus(xc_interface *xch,
-                        uint32_t domid,
-                        unsigned int max);
-
-/**
- * This function pauses a domain. A paused domain still exists in memory
- * however it does not receive any timeslices from the hypervisor.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to pause
- * @return 0 on success, -1 on failure.
- */
-int xc_domain_pause(xc_interface *xch,
-                    uint32_t domid);
-/**
- * This function unpauses a domain.  The domain should have been previously
- * paused.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to unpause
- * return 0 on success, -1 on failure
- */
-int xc_domain_unpause(xc_interface *xch,
-                      uint32_t domid);
-
-/**
- * This function will destroy a domain.  Destroying a domain removes the domain
- * completely from memory.  This function should be called after sending the
- * domain a SHUTDOWN control message to free up the domain resources.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to destroy
- * @return 0 on success, -1 on failure
- */
-int xc_domain_destroy(xc_interface *xch,
-                      uint32_t domid);
-
-
-/**
- * This function resumes a suspended domain. The domain should have
- * been previously suspended.
- *
- * Note that there are 'xc_domain_suspend' as suspending a domain
- * is quite the endeavour.
- *
- * For the purpose of this explanation there are three guests:
- * PV (using hypercalls for privilgied operations), HVM
- * (fully hardware virtualized guests using emulated devices for everything),
- * and PVHVM (PV aware with hardware virtualisation).
- *
- * HVM guest are the simplest - they suspend via S3 / S4 and resume from
- * S3 / S4. Upon resume they have to re-negotiate with the emulated devices.
- *
- * PV and PVHVM communicate via hypercalls for suspend (and resume).
- * For suspend the toolstack initiates the process by writing an value
- * in XenBus "control/shutdown" with the string "suspend".
- *
- * The PV guest stashes anything it deems neccessary in 'struct
- * start_info' in case of failure (PVHVM may ignore this) and calls
- * the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall (for PV as
- * argument it passes the MFN to 'struct start_info').
- *
- * And then the guest is suspended.
- *
- * The checkpointing or notifying a guest that the suspend failed or
- * cancelled (in case of checkpoint) is by having the
- * SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return a non-zero
- * value.
- *
- * The PV and PVHVM resume path are similar. For PV it would be
- * similar to bootup - figure out where the 'struct start_info' is (or
- * if the suspend was cancelled aka checkpointed - reuse the saved
- * values).
- *
- * From here on they differ depending whether the guest is PV or PVHVM
- * in specifics but follow overall the same path:
- *  - PV: Bringing up the vCPUS,
- *  - PVHVM: Setup vector callback,
- *  - Bring up vCPU runstates,
- *  - Remap the grant tables if checkpointing or setup from scratch,
- *
- *
- * If the resume was not checkpointing (or if suspend was succesful) we would
- * setup the PV timers and the different PV events. Lastly the PV drivers
- * re-negotiate with the backend.
- *
- * This function would return before the guest started resuming. That is
- * the guest would be in non-running state and its vCPU context would be
- * in the the SCHEDOP_shutdown::SHUTDOWN_suspend hypercall return path
- * (for PV and PVHVM). For HVM it would be in would be in QEMU emulated
- * BIOS handling S3 suspend.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to resume
- * @parm fast use cooperative resume (guest must support this)
- * return 0 on success, -1 on failure
- */
-int xc_domain_resume(xc_interface *xch,
-                    uint32_t domid,
-                    int fast);
-
-/**
- * This function will shutdown a domain. This is intended for use in
- * fully-virtualized domains where this operation is analogous to the
- * sched_op operations in a paravirtualized domain. The caller is
- * expected to give the reason for the shutdown.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to destroy
- * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
- * @return 0 on success, -1 on failure
- */
-int xc_domain_shutdown(xc_interface *xch,
-                       uint32_t domid,
-                       int reason);
-
-int xc_watchdog(xc_interface *xch,
-               uint32_t id,
-               uint32_t timeout);
-
-/**
- * This function explicitly sets the host NUMA nodes the domain will
- * have affinity with.
- *
- * @parm xch a handle to an open hypervisor interface.
- * @parm domid the domain id one wants to set the affinity of.
- * @parm nodemap the map of the affine nodes.
- * @return 0 on success, -1 on failure.
- */
-int xc_domain_node_setaffinity(xc_interface *xch,
-                               uint32_t domind,
-                               xc_nodemap_t nodemap);
-
-/**
- * This function retrieves the host NUMA nodes the domain has
- * affinity with.
- *
- * @parm xch a handle to an open hypervisor interface.
- * @parm domid the domain id one wants to get the node affinity of.
- * @parm nodemap the map of the affine nodes.
- * @return 0 on success, -1 on failure.
- */
-int xc_domain_node_getaffinity(xc_interface *xch,
-                               uint32_t domind,
-                               xc_nodemap_t nodemap);
-
-/**
- * This function specifies the CPU affinity for a vcpu.
- *
- * There are two kinds of affinity. Soft affinity is on what CPUs a vcpu
- * prefers to run. Hard affinity is on what CPUs a vcpu is allowed to run.
- * If flags contains XEN_VCPUAFFINITY_SOFT, the soft affinity it is set to
- * what cpumap_soft_inout contains. If flags contains XEN_VCPUAFFINITY_HARD,
- * the hard affinity is set to what cpumap_hard_inout contains. Both flags
- * can be set at the same time, in which case both soft and hard affinity are
- * set to what the respective parameter contains.
- *
- * The function also returns the effective hard or/and soft affinity, still
- * via the cpumap_soft_inout and cpumap_hard_inout parameters. Effective
- * affinity is, in case of soft affinity, the intersection of soft affinity,
- * hard affinity and the cpupool's online CPUs for the domain, and is returned
- * in cpumap_soft_inout, if XEN_VCPUAFFINITY_SOFT is set in flags. In case of
- * hard affinity, it is the intersection between hard affinity and the
- * cpupool's online CPUs, and is returned in cpumap_hard_inout, if
- * XEN_VCPUAFFINITY_HARD is set in flags. If both flags are set, both soft
- * and hard affinity are returned in the respective parameter.
- *
- * We do report it back as effective affinity is what the Xen scheduler will
- * actually use, and we thus allow checking whether or not that matches with,
- * or at least is good enough for, the caller's purposes.
- *
- * @param xch a handle to an open hypervisor interface.
- * @param domid the id of the domain to which the vcpu belongs
- * @param vcpu the vcpu id wihin the domain
- * @param cpumap_hard_inout specifies(/returns) the (effective) hard affinity
- * @param cpumap_soft_inout specifies(/returns) the (effective) soft affinity
- * @param flags what we want to set
- */
-int xc_vcpu_setaffinity(xc_interface *xch,
-                        uint32_t domid,
-                        int vcpu,
-                        xc_cpumap_t cpumap_hard_inout,
-                        xc_cpumap_t cpumap_soft_inout,
-                        uint32_t flags);
-
-/**
- * This function retrieves hard and soft CPU affinity of a vcpu,
- * depending on what flags are set.
- *
- * Soft affinity is returned in cpumap_soft if XEN_VCPUAFFINITY_SOFT is set.
- * Hard affinity is returned in cpumap_hard if XEN_VCPUAFFINITY_HARD is set.
- *
- * @param xch a handle to an open hypervisor interface.
- * @param domid the id of the domain to which the vcpu belongs
- * @param vcpu the vcpu id wihin the domain
- * @param cpumap_hard is where hard affinity is returned
- * @param cpumap_soft is where soft affinity is returned
- * @param flags what we want get
- */
-int xc_vcpu_getaffinity(xc_interface *xch,
-                        uint32_t domid,
-                        int vcpu,
-                        xc_cpumap_t cpumap_hard,
-                        xc_cpumap_t cpumap_soft,
-                        uint32_t flags);
-
-
-/**
- * This function will return the guest_width (in bytes) for the
- * specified domain.
- *
- * @param xch a handle to an open hypervisor interface.
- * @param domid the domain id one wants the address size width of.
- * @param addr_size the address size.
- */
-int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
-                              unsigned int *guest_width);
-
-
-/**
- * This function will return information about one or more domains. It is
- * designed to iterate over the list of domains. If a single domain is
- * requested, this function will return the next domain in the list - if
- * one exists. It is, therefore, important in this case to make sure the
- * domain requested was the one returned.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm first_domid the first domain to enumerate information from.  Domains
- *                   are currently enumerate in order of creation.
- * @parm max_doms the number of elements in info
- * @parm info an array of max_doms size that will contain the information for
- *            the enumerated domains.
- * @return the number of domains enumerated or -1 on error
- */
-int xc_domain_getinfo(xc_interface *xch,
-                      uint32_t first_domid,
-                      unsigned int max_doms,
-                      xc_dominfo_t *info);
-
-
-/**
- * This function will set the execution context for the specified vcpu.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to set the vcpu context for
- * @parm vcpu the vcpu number for the context
- * @parm ctxt pointer to the the cpu context with the values to set
- * @return the number of domains enumerated or -1 on error
- */
-int xc_vcpu_setcontext(xc_interface *xch,
-                       uint32_t domid,
-                       uint32_t vcpu,
-                       vcpu_guest_context_any_t *ctxt);
-/**
- * This function will return information about one or more domains, using a
- * single hypercall.  The domain information will be stored into the supplied
- * array of xc_domaininfo_t structures.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm first_domain the first domain to enumerate information from.
- *                    Domains are currently enumerate in order of creation.
- * @parm max_domains the number of elements in info
- * @parm info an array of max_doms size that will contain the information for
- *            the enumerated domains.
- * @return the number of domains enumerated or -1 on error
- */
-int xc_domain_getinfolist(xc_interface *xch,
-                          uint32_t first_domain,
-                          unsigned int max_domains,
-                          xc_domaininfo_t *info);
-
-/**
- * This function set p2m for broken page
- * &parm xch a handle to an open hypervisor interface
- * @parm domid the domain id which broken page belong to
- * @parm pfn the pfn number of the broken page
- * @return 0 on success, -1 on failure
- */
-int xc_set_broken_page_p2m(xc_interface *xch,
-                           uint32_t domid,
-                           unsigned long pfn);
-
-/**
- * This function returns information about the context of a hvm domain
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get information from
- * @parm ctxt_buf a pointer to a structure to store the execution context of
- *            the hvm domain
- * @parm size the size of ctxt_buf in bytes
- * @return 0 on success, -1 on failure
- */
-int xc_domain_hvm_getcontext(xc_interface *xch,
-                             uint32_t domid,
-                             uint8_t *ctxt_buf,
-                             uint32_t size);
-
-
-/**
- * This function returns one element of the context of a hvm domain
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get information from
- * @parm typecode which type of elemnt required 
- * @parm instance which instance of the type
- * @parm ctxt_buf a pointer to a structure to store the execution context of
- *            the hvm domain
- * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
- * @return 0 on success, -1 on failure
- */
-int xc_domain_hvm_getcontext_partial(xc_interface *xch,
-                                     uint32_t domid,
-                                     uint16_t typecode,
-                                     uint16_t instance,
-                                     void *ctxt_buf,
-                                     uint32_t size);
-
-/**
- * This function will set the context for hvm domain
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to set the hvm domain context for
- * @parm hvm_ctxt pointer to the the hvm context with the values to set
- * @parm size the size of hvm_ctxt in bytes
- * @return 0 on success, -1 on failure
- */
-int xc_domain_hvm_setcontext(xc_interface *xch,
-                             uint32_t domid,
-                             uint8_t *hvm_ctxt,
-                             uint32_t size);
-
-/**
- * This function will return guest IO ABI protocol
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get IO ABI protocol for
- * @return guest protocol on success, NULL on failure
- */
-const char *xc_domain_get_native_protocol(xc_interface *xch,
-                                          uint32_t domid);
-
-/**
- * This function returns information about the execution context of a
- * particular vcpu of a domain.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get information from
- * @parm vcpu the vcpu number
- * @parm ctxt a pointer to a structure to store the execution context of the
- *            domain
- * @return 0 on success, -1 on failure
- */
-int xc_vcpu_getcontext(xc_interface *xch,
-                       uint32_t domid,
-                       uint32_t vcpu,
-                       vcpu_guest_context_any_t *ctxt);
-
-/**
- * This function initializes the vuart emulation and returns
- * the event to be used by the backend for communicating with
- * the emulation code.
- *
- * @parm xch a handle to an open hypervisor interface
- * #parm type type of vuart
- * @parm domid the domain to get information from
- * @parm console_domid the domid of the backend console
- * @parm gfn the guest pfn to be used as the ring buffer
- * @parm evtchn the event channel to be used for events
- * @return 0 on success, negative error on failure
- */
-int xc_dom_vuart_init(xc_interface *xch,
-                      uint32_t type,
-                      uint32_t domid,
-                      uint32_t console_domid,
-                      xen_pfn_t gfn,
-                      evtchn_port_t *evtchn);
-
-/**
- * This function returns information about the XSAVE state of a particular
- * vcpu of a domain. If extstate->size and extstate->xfeature_mask are 0,
- * the call is considered a query to retrieve them and the buffer is not
- * filled.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to get information from
- * @parm vcpu the vcpu number
- * @parm extstate a pointer to a structure to store the XSAVE state of the
- *                domain
- * @return 0 on success, negative error code on failure
- */
-int xc_vcpu_get_extstate(xc_interface *xch,
-                         uint32_t domid,
-                         uint32_t vcpu,
-                         xc_vcpu_extstate_t *extstate);
-
-typedef struct xen_domctl_getvcpuinfo xc_vcpuinfo_t;
-int xc_vcpu_getinfo(xc_interface *xch,
-                    uint32_t domid,
-                    uint32_t vcpu,
-                    xc_vcpuinfo_t *info);
-
-long long xc_domain_get_cpu_usage(xc_interface *xch,
-                                  uint32_t domid,
-                                  int vcpu);
-
-int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
-                        xen_domain_handle_t handle);
-
-typedef struct xen_domctl_shadow_op_stats xc_shadow_op_stats_t;
-int xc_shadow_control(xc_interface *xch,
-                      uint32_t domid,
-                      unsigned int sop,
-                      xc_hypercall_buffer_t *dirty_bitmap,
-                      unsigned long pages,
-                      unsigned long *mb,
-                      uint32_t mode,
-                      xc_shadow_op_stats_t *stats);
-
-int xc_sched_credit_domain_set(xc_interface *xch,
-                               uint32_t domid,
-                               struct xen_domctl_sched_credit *sdom);
-
-int xc_sched_credit_domain_get(xc_interface *xch,
-                               uint32_t domid,
-                               struct xen_domctl_sched_credit *sdom);
-int xc_sched_credit_params_set(xc_interface *xch,
-                               uint32_t cpupool_id,
-                               struct xen_sysctl_credit_schedule *schedule);
-int xc_sched_credit_params_get(xc_interface *xch,
-                               uint32_t cpupool_id,
-                               struct xen_sysctl_credit_schedule *schedule);
-
-int xc_sched_credit2_params_set(xc_interface *xch,
-                                uint32_t cpupool_id,
-                                struct xen_sysctl_credit2_schedule *schedule);
-int xc_sched_credit2_params_get(xc_interface *xch,
-                                uint32_t cpupool_id,
-                                struct xen_sysctl_credit2_schedule *schedule);
-int xc_sched_credit2_domain_set(xc_interface *xch,
-                                uint32_t domid,
-                                struct xen_domctl_sched_credit2 *sdom);
-int xc_sched_credit2_domain_get(xc_interface *xch,
-                                uint32_t domid,
-                                struct xen_domctl_sched_credit2 *sdom);
-
-int xc_sched_rtds_domain_set(xc_interface *xch,
-                             uint32_t domid,
-                             struct xen_domctl_sched_rtds *sdom);
-int xc_sched_rtds_domain_get(xc_interface *xch,
-                             uint32_t domid,
-                             struct xen_domctl_sched_rtds *sdom);
-int xc_sched_rtds_vcpu_set(xc_interface *xch,
-                           uint32_t domid,
-                           struct xen_domctl_schedparam_vcpu *vcpus,
-                           uint32_t num_vcpus);
-int xc_sched_rtds_vcpu_get(xc_interface *xch,
-                           uint32_t domid,
-                           struct xen_domctl_schedparam_vcpu *vcpus,
-                           uint32_t num_vcpus);
-
-int
-xc_sched_arinc653_schedule_set(
-    xc_interface *xch,
-    uint32_t cpupool_id,
-    struct xen_sysctl_arinc653_schedule *schedule);
-
-int
-xc_sched_arinc653_schedule_get(
-    xc_interface *xch,
-    uint32_t cpupool_id,
-    struct xen_sysctl_arinc653_schedule *schedule);
-
-/**
- * This function sends a trigger to a domain.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to send trigger
- * @parm trigger the trigger type
- * @parm vcpu the vcpu number to send trigger 
- * return 0 on success, -1 on failure
- */
-int xc_domain_send_trigger(xc_interface *xch,
-                           uint32_t domid,
-                           uint32_t trigger,
-                           uint32_t vcpu);
-
-/**
- * This function enables or disable debugging of a domain.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to send trigger
- * @parm enable true to enable debugging
- * return 0 on success, -1 on failure
- */
-int xc_domain_setdebugging(xc_interface *xch,
-                           uint32_t domid,
-                           unsigned int enable);
-
-/**
- * This function audits the (top level) p2m of a domain 
- * and returns the different error counts, if any.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id whose top level p2m we 
- *       want to audit
- * @parm orphans count of m2p entries for valid
- *       domain pages containing an invalid value
- * @parm m2p_bad count of m2p entries mismatching the
- *       associated p2m entry for this domain
- * @parm p2m_bad count of p2m entries for this domain
- *       mismatching the associated m2p entry
- * return 0 on success, -1 on failure
- * errno values on failure include: 
- *          -ENOSYS: not implemented
- *          -EFAULT: could not copy results back to guest
- */
-int xc_domain_p2m_audit(xc_interface *xch,
-                        uint32_t domid,
-                        uint64_t *orphans,
-                        uint64_t *m2p_bad,   
-                        uint64_t *p2m_bad);
-
-/**
- * This function sets or clears the requirement that an access memory
- * event listener is required on the domain.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id to send trigger
- * @parm enable true to require a listener
- * return 0 on success, -1 on failure
- */
-int xc_domain_set_access_required(xc_interface *xch,
-                                 uint32_t domid,
-                                 unsigned int required);
-/**
- * This function sets the handler of global VIRQs sent by the hypervisor
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain id which will handle the VIRQ
- * @parm virq the virq number (VIRQ_*)
- * return 0 on success, -1 on failure
- */
-int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
-
-/*
- * CPUPOOL MANAGEMENT FUNCTIONS
- */
-
-typedef struct xc_cpupoolinfo {
-    uint32_t cpupool_id;
-    uint32_t sched_id;
-    uint32_t n_dom;
-    xc_cpumap_t cpumap;
-} xc_cpupoolinfo_t;
-
-#define XC_CPUPOOL_POOLID_ANY 0xFFFFFFFF
-
-/**
- * Create a new cpupool.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm ppoolid pointer to the new cpupool id (in/out)
- * @parm sched_id id of scheduler to use for pool
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_create(xc_interface *xch,
-                      uint32_t *ppoolid,
-                      uint32_t sched_id);
-
-/**
- * Destroy a cpupool. Pool must be unused and have no cpu assigned.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid id of the cpupool to destroy
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_destroy(xc_interface *xch,
-                       uint32_t poolid);
-
-/**
- * Get cpupool info. Returns info for up to the specified number of cpupools
- * starting at the given id.
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid lowest id for which info is returned
- * return cpupool info ptr (to be freed via xc_cpupool_infofree)
- */
-xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
-                       uint32_t poolid);
-
-/**
- * Free cpupool info. Used to free info obtained via xc_cpupool_getinfo.
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm info area to free
- */
-void xc_cpupool_infofree(xc_interface *xch,
-                         xc_cpupoolinfo_t *info);
-
-/**
- * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid id of the cpupool
- * @parm cpu cpu number to add
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_addcpu(xc_interface *xch,
-                      uint32_t poolid,
-                      int cpu);
-
-/**
- * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid id of the cpupool
- * @parm cpu cpu number to remove
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_removecpu(xc_interface *xch,
-                         uint32_t poolid,
-                         int cpu);
-
-/**
- * Move domain to another cpupool.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * @parm poolid id of the destination cpupool
- * @parm domid id of the domain to move
- * return 0 on success, -1 on failure
- */
-int xc_cpupool_movedomain(xc_interface *xch,
-                          uint32_t poolid,
-                          uint32_t domid);
-
-/**
- * Return map of cpus not in any cpupool.
- *
- * @parm xc_handle a handle to an open hypervisor interface
- * return cpumap array on success, NULL else
- */
-xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch);
-
-/*
- * EVENT CHANNEL FUNCTIONS
- *
- * None of these do any logging.
- */
-
-/* A port identifier is guaranteed to fit in 31 bits. */
-typedef int xc_evtchn_port_or_error_t;
-
-/**
- * This function allocates an unbound port.  Ports are named endpoints used for
- * interdomain communication.  This function is most useful in opening a
- * well-known port within a domain to receive events on.
- * 
- * NOTE: If you are allocating a *local* unbound port, you probably want to
- * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
- * ports *only* during domain creation.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm dom the ID of the local domain (the 'allocatee')
- * @parm remote_dom the ID of the domain who will later bind
- * @return allocated port (in @dom) on success, -1 on failure
- */
-xc_evtchn_port_or_error_t
-xc_evtchn_alloc_unbound(xc_interface *xch,
-                        uint32_t dom,
-                        uint32_t remote_dom);
-
-int xc_evtchn_reset(xc_interface *xch,
-                    uint32_t dom);
-
-typedef struct evtchn_status xc_evtchn_status_t;
-int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status);
-
-
-
-int xc_physdev_pci_access_modify(xc_interface *xch,
-                                 uint32_t domid,
-                                 int bus,
-                                 int dev,
-                                 int func,
-                                 int enable);
-
-int xc_readconsolering(xc_interface *xch,
-                       char *buffer,
-                       unsigned int *pnr_chars,
-                       int clear, int incremental, uint32_t *pindex);
-
-int xc_send_debug_keys(xc_interface *xch, const char *keys);
-
-typedef struct xen_sysctl_physinfo xc_physinfo_t;
-typedef struct xen_sysctl_cputopo xc_cputopo_t;
-typedef struct xen_sysctl_numainfo xc_numainfo_t;
-typedef struct xen_sysctl_meminfo xc_meminfo_t;
-typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
-
-typedef uint32_t xc_cpu_to_node_t;
-typedef uint32_t xc_cpu_to_socket_t;
-typedef uint32_t xc_cpu_to_core_t;
-typedef uint64_t xc_node_to_memsize_t;
-typedef uint64_t xc_node_to_memfree_t;
-typedef uint32_t xc_node_to_node_dist_t;
-
-int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
-int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
-                   xc_cputopo_t *cputopo);
-int xc_microcode_update(xc_interface *xch, const void *buf, size_t len);
-int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
-                xc_meminfo_t *meminfo, uint32_t *distance);
-int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
-                   physdev_pci_device_t *devs, uint32_t *nodes);
-
-int xc_sched_id(xc_interface *xch,
-                int *sched_id);
-
-int xc_machphys_mfn_list(xc_interface *xch,
-                         unsigned long max_extents,
-                         xen_pfn_t *extent_start);
-
-typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
-int xc_getcpuinfo(xc_interface *xch, int max_cpus,
-                  xc_cpuinfo_t *info, int *nr_cpus); 
-
-int xc_domain_setmaxmem(xc_interface *xch,
-                        uint32_t domid,
-                        uint64_t max_memkb);
-
-int xc_domain_set_memmap_limit(xc_interface *xch,
-                               uint32_t domid,
-                               unsigned long map_limitkb);
-
-int xc_domain_setvnuma(xc_interface *xch,
-                        uint32_t domid,
-                        uint32_t nr_vnodes,
-                        uint32_t nr_regions,
-                        uint32_t nr_vcpus,
-                        xen_vmemrange_t *vmemrange,
-                        unsigned int *vdistance,
-                        unsigned int *vcpu_to_vnode,
-                        unsigned int *vnode_to_pnode);
-/*
- * Retrieve vnuma configuration
- * domid: IN, target domid
- * nr_vnodes: IN/OUT, number of vnodes, not NULL
- * nr_vmemranges: IN/OUT, number of vmemranges, not NULL
- * nr_vcpus: IN/OUT, number of vcpus, not NULL
- * vmemranges: OUT, an array which has length of nr_vmemranges
- * vdistance: OUT, an array which has length of nr_vnodes * nr_vnodes
- * vcpu_to_vnode: OUT, an array which has length of nr_vcpus
- */
-int xc_domain_getvnuma(xc_interface *xch,
-                       uint32_t domid,
-                       uint32_t *nr_vnodes,
-                       uint32_t *nr_vmemranges,
-                       uint32_t *nr_vcpus,
-                       xen_vmemrange_t *vmemrange,
-                       unsigned int *vdistance,
-                       unsigned int *vcpu_to_vnode);
-
-int xc_domain_soft_reset(xc_interface *xch,
-                         uint32_t domid);
-
-#if defined(__i386__) || defined(__x86_64__)
-/*
- * PC BIOS standard E820 types and structure.
- */
-#define E820_RAM          1
-#define E820_RESERVED     2
-#define E820_ACPI         3
-#define E820_NVS          4
-#define E820_UNUSABLE     5
-
-#define E820MAX           (128)
-
-struct e820entry {
-    uint64_t addr;
-    uint64_t size;
-    uint32_t type;
-} __attribute__((packed));
-int xc_domain_set_memory_map(xc_interface *xch,
-                               uint32_t domid,
-                               struct e820entry entries[],
-                               uint32_t nr_entries);
-
-int xc_get_machine_memory_map(xc_interface *xch,
-                              struct e820entry entries[],
-                              uint32_t max_entries);
-#endif
-
-int xc_reserved_device_memory_map(xc_interface *xch,
-                                  uint32_t flags,
-                                  uint16_t seg,
-                                  uint8_t bus,
-                                  uint8_t devfn,
-                                  struct xen_reserved_device_memory entries[],
-                                  uint32_t *max_entries);
-int xc_domain_set_time_offset(xc_interface *xch,
-                              uint32_t domid,
-                              int32_t time_offset_seconds);
-
-int xc_domain_set_tsc_info(xc_interface *xch,
-                           uint32_t domid,
-                           uint32_t tsc_mode,
-                           uint64_t elapsed_nsec,
-                           uint32_t gtsc_khz,
-                           uint32_t incarnation);
-
-int xc_domain_get_tsc_info(xc_interface *xch,
-                           uint32_t domid,
-                           uint32_t *tsc_mode,
-                           uint64_t *elapsed_nsec,
-                           uint32_t *gtsc_khz,
-                           uint32_t *incarnation);
-
-int xc_domain_disable_migrate(xc_interface *xch, uint32_t domid);
-
-int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
-
-int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
-
-int xc_domain_increase_reservation(xc_interface *xch,
-                                   uint32_t domid,
-                                   unsigned long nr_extents,
-                                   unsigned int extent_order,
-                                   unsigned int mem_flags,
-                                   xen_pfn_t *extent_start);
-
-int xc_domain_increase_reservation_exact(xc_interface *xch,
-                                         uint32_t domid,
-                                         unsigned long nr_extents,
-                                         unsigned int extent_order,
-                                         unsigned int mem_flags,
-                                         xen_pfn_t *extent_start);
-
-int xc_domain_decrease_reservation(xc_interface *xch,
-                                   uint32_t domid,
-                                   unsigned long nr_extents,
-                                   unsigned int extent_order,
-                                   xen_pfn_t *extent_start);
-
-int xc_domain_decrease_reservation_exact(xc_interface *xch,
-                                         uint32_t domid,
-                                         unsigned long nr_extents,
-                                         unsigned int extent_order,
-                                         xen_pfn_t *extent_start);
-
-int xc_domain_add_to_physmap(xc_interface *xch,
-                             uint32_t domid,
-                             unsigned int space,
-                             unsigned long idx,
-                             xen_pfn_t gpfn);
-
-int xc_domain_add_to_physmap_batch(xc_interface *xch,
-                                   uint32_t domid,
-                                   uint32_t foreign_domid,
-                                   unsigned int space,
-                                   unsigned int size,
-                                   xen_ulong_t *idxs,
-                                   xen_pfn_t *gfpns,
-                                   int *errs);
-
-int xc_domain_remove_from_physmap(xc_interface *xch,
-                                  uint32_t domid,
-                                  xen_pfn_t gpfn);
-
-int xc_domain_populate_physmap(xc_interface *xch,
-                               uint32_t domid,
-                               unsigned long nr_extents,
-                               unsigned int extent_order,
-                               unsigned int mem_flags,
-                               xen_pfn_t *extent_start);
-
-int xc_domain_populate_physmap_exact(xc_interface *xch,
-                                     uint32_t domid,
-                                     unsigned long nr_extents,
-                                     unsigned int extent_order,
-                                     unsigned int mem_flags,
-                                     xen_pfn_t *extent_start);
-
-int xc_domain_claim_pages(xc_interface *xch,
-                               uint32_t domid,
-                               unsigned long nr_pages);
-
-int xc_domain_memory_exchange_pages(xc_interface *xch,
-                                    uint32_t domid,
-                                    unsigned long nr_in_extents,
-                                    unsigned int in_order,
-                                    xen_pfn_t *in_extents,
-                                    unsigned long nr_out_extents,
-                                    unsigned int out_order,
-                                    xen_pfn_t *out_extents);
-
-int xc_domain_set_pod_target(xc_interface *xch,
-                             uint32_t domid,
-                             uint64_t target_pages,
-                             uint64_t *tot_pages,
-                             uint64_t *pod_cache_pages,
-                             uint64_t *pod_entries);
-
-int xc_domain_get_pod_target(xc_interface *xch,
-                             uint32_t domid,
-                             uint64_t *tot_pages,
-                             uint64_t *pod_cache_pages,
-                             uint64_t *pod_entries);
-
-int xc_domain_ioport_permission(xc_interface *xch,
-                                uint32_t domid,
-                                uint32_t first_port,
-                                uint32_t nr_ports,
-                                uint32_t allow_access);
-
-int xc_domain_irq_permission(xc_interface *xch,
-                             uint32_t domid,
-                             uint8_t pirq,
-                             uint8_t allow_access);
-
-int xc_domain_iomem_permission(xc_interface *xch,
-                               uint32_t domid,
-                               unsigned long first_mfn,
-                               unsigned long nr_mfns,
-                               uint8_t allow_access);
-
-unsigned long xc_make_page_below_4G(xc_interface *xch, uint32_t domid,
-                                    unsigned long mfn);
-
-typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
-typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
-int xc_perfc_reset(xc_interface *xch);
-int xc_perfc_query_number(xc_interface *xch,
-                          int *nbr_desc,
-                          int *nbr_val);
-int xc_perfc_query(xc_interface *xch,
-                   xc_hypercall_buffer_t *desc,
-                   xc_hypercall_buffer_t *val);
-
-typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
-int xc_lockprof_reset(xc_interface *xch);
-int xc_lockprof_query_number(xc_interface *xch,
-                             uint32_t *n_elems);
-int xc_lockprof_query(xc_interface *xch,
-                      uint32_t *n_elems,
-                      uint64_t *time,
-                      xc_hypercall_buffer_t *data);
-
-void *xc_memalign(xc_interface *xch, size_t alignment, size_t size);
-
-/**
- * Avoid using this function, as it does not work for all cases (such
- * as 4M superpages, or guests using PSE36). Only used for debugging.
- *
- * Translates a virtual address in the context of a given domain and
- * vcpu returning the GFN containing the address (that is, an MFN for 
- * PV guests, a PFN for HVM guests).  Returns 0 for failure.
- *
- * @parm xch a handle on an open hypervisor interface
- * @parm dom the domain to perform the translation in
- * @parm vcpu the vcpu to perform the translation on
- * @parm virt the virtual address to translate
- */
-unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
-                                           int vcpu, unsigned long long virt);
-
-
-int xc_copy_to_domain_page(xc_interface *xch, uint32_t domid,
-                           unsigned long dst_pfn, const char *src_page);
-
-int xc_clear_domain_pages(xc_interface *xch, uint32_t domid,
-                          unsigned long dst_pfn, int num);
-
-static inline int xc_clear_domain_page(xc_interface *xch, uint32_t domid,
-                                       unsigned long dst_pfn)
-{
-    return xc_clear_domain_pages(xch, domid, dst_pfn, 1);
-}
-
-int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
-                 uint32_t dom);
-
-/* System wide memory properties */
-int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn);
-
-/* Get current total pages allocated to a domain. */
-long xc_get_tot_pages(xc_interface *xch, uint32_t domid);
-
-/**
- * This function retrieves the the number of bytes available
- * in the heap in a specific range of address-widths and nodes.
- * 
- * @parm xch a handle to an open hypervisor interface
- * @parm domid the domain to query
- * @parm min_width the smallest address width to query (0 if don't care)
- * @parm max_width the largest address width to query (0 if don't care)
- * @parm node the node to query (-1 for all)
- * @parm *bytes caller variable to put total bytes counted
- * @return 0 on success, <0 on failure.
- */
-int xc_availheap(xc_interface *xch, int min_width, int max_width, int node,
-                 uint64_t *bytes);
-
-/*
- * Trace Buffer Operations
- */
-
-/**
- * xc_tbuf_enable - enable tracing buffers
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm cnt size of tracing buffers to create (in pages)
- * @parm mfn location to store mfn of the trace buffers to
- * @parm size location to store the size (in bytes) of a trace buffer to
- *
- * Gets the machine address of the trace pointer area and the size of the
- * per CPU buffers.
- */
-int xc_tbuf_enable(xc_interface *xch, unsigned long pages,
-                   unsigned long *mfn, unsigned long *size);
-
-/*
- * Disable tracing buffers.
- */
-int xc_tbuf_disable(xc_interface *xch);
-
-/**
- * This function sets the size of the trace buffers. Setting the size
- * is currently a one-shot operation that may be performed either at boot
- * time or via this interface, not both. The buffer size must be set before
- * enabling tracing.
- *
- * @parm xch a handle to an open hypervisor interface
- * @parm size the size in pages per cpu for the trace buffers
- * @return 0 on success, -1 on failure.
- */
-int xc_tbuf_set_size(xc_interface *xch, unsigned long size);
-
-/**
- * This function retrieves the current size of the trace buffers.
- * Note that the size returned is in terms of bytes, not pages.
-
- * @parm xch a handle to an open hypervisor interface
- * @parm size will contain the size in bytes for the trace buffers
- * @return 0 on success, -1 on failure.
- */
-int xc_tbuf_get_size(xc_interface *xch, unsigned long *size);
-
-int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask);
-
-int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask);
-
-int xc_domctl(xc_interface *xch, struct xen_domctl *domctl);
-int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl);
-
-int xc_version(xc_interface *xch, int cmd, void *arg);
-
-int xc_flask_op(xc_interface *xch, xen_flask_op_t *op);
-
-/*
- * Subscribe to domain suspend via evtchn.
- * Returns -1 on failure, in which case errno will be set appropriately.
- * Just calls XEN_DOMCTL_subscribe - see the caveats for that domctl
- * (in its doc comment in domctl.h).
- */
-int xc_domain_subscribe_for_suspend(
-    xc_interface *xch, uint32_t domid, evtchn_port_t port);
-
-/**************************
- * GRANT TABLE OPERATIONS *
- **************************/
-
-/*
- * These functions sometimes log messages as above, but not always.
- */
-
-
-int xc_gnttab_op(xc_interface *xch, int cmd,
-                 void * op, int op_size, int count);
-/* Logs iff hypercall bounce fails, otherwise doesn't. */
-
-int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query);
-int xc_gnttab_get_version(xc_interface *xch, uint32_t domid); /* Never logs */
-grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid, int *gnt_num);
-grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid, int *gnt_num);
-/* Sometimes these don't set errno [fixme], and sometimes they don't log. */
-
-int xc_physdev_map_pirq(xc_interface *xch,
-                        uint32_t domid,
-                        int index,
-                        int *pirq);
-
-int xc_physdev_map_pirq_msi(xc_interface *xch,
-                            uint32_t domid,
-                            int index,
-                            int *pirq,
-                            int devfn,
-                            int bus,
-                            int entry_nr,
-                            uint64_t table_base);
-
-int xc_physdev_unmap_pirq(xc_interface *xch,
-                          uint32_t domid,
-                          int pirq);
-
-/*
- *  LOGGING AND ERROR REPORTING
- */
-
-
-#define XC_MAX_ERROR_MSG_LEN 1024
-typedef struct xc_error {
-  enum xc_error_code code;
-  char message[XC_MAX_ERROR_MSG_LEN];
-} xc_error;
-
-
-/*
- * Convert an error code or level into a text description.  Return values
- * are pointers to fixed strings and do not need to be freed.
- * Do not fail, but return pointers to generic strings if fed bogus input.
- */
-const char *xc_error_code_to_desc(int code);
-
-/*
- * Convert an errno value to a text description.
- */
-const char *xc_strerror(xc_interface *xch, int errcode);
-
-
-/*
- * Return a pointer to the last error with level XC_REPORT_ERROR. This
- * pointer and the data pointed to are only valid until the next call
- * to libxc in the same thread.
- */
-const xc_error *xc_get_last_error(xc_interface *handle);
-
-/*
- * Clear the last error
- */
-void xc_clear_last_error(xc_interface *xch);
-
-int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value);
-int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value);
-
-/* Deprecated: use xc_hvm_param_set/get() instead. */
-int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value);
-int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value);
-
-/* HVM guest pass-through */
-int xc_assign_device(xc_interface *xch,
-                     uint32_t domid,
-                     uint32_t machine_sbdf,
-                     uint32_t flag);
-
-int xc_get_device_group(xc_interface *xch,
-                     uint32_t domid,
-                     uint32_t machine_sbdf,
-                     uint32_t max_sdevs,
-                     uint32_t *num_sdevs,
-                     uint32_t *sdev_array);
-
-int xc_test_assign_device(xc_interface *xch,
-                          uint32_t domid,
-                          uint32_t machine_sbdf);
-
-int xc_deassign_device(xc_interface *xch,
-                     uint32_t domid,
-                     uint32_t machine_sbdf);
-
-int xc_assign_dt_device(xc_interface *xch,
-                        uint32_t domid,
-                        char *path);
-int xc_test_assign_dt_device(xc_interface *xch,
-                             uint32_t domid,
-                             char *path);
-int xc_deassign_dt_device(xc_interface *xch,
-                          uint32_t domid,
-                          char *path);
-
-int xc_domain_memory_mapping(xc_interface *xch,
-                             uint32_t domid,
-                             unsigned long first_gfn,
-                             unsigned long first_mfn,
-                             unsigned long nr_mfns,
-                             uint32_t add_mapping);
-
-int xc_domain_ioport_mapping(xc_interface *xch,
-                             uint32_t domid,
-                             uint32_t first_gport,
-                             uint32_t first_mport,
-                             uint32_t nr_ports,
-                             uint32_t add_mapping);
-
-int xc_domain_update_msi_irq(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t gvec,
-    uint32_t pirq,
-    uint32_t gflags,
-    uint64_t gtable);
-
-int xc_domain_unbind_msi_irq(xc_interface *xch,
-                             uint32_t domid,
-                             uint32_t gvec,
-                             uint32_t pirq,
-                             uint32_t gflags);
-
-int xc_domain_bind_pt_irq(xc_interface *xch,
-                          uint32_t domid,
-                          uint8_t machine_irq,
-                          uint8_t irq_type,
-                          uint8_t bus,
-                          uint8_t device,
-                          uint8_t intx,
-                          uint8_t isa_irq);
-
-int xc_domain_unbind_pt_irq(xc_interface *xch,
-                          uint32_t domid,
-                          uint8_t machine_irq,
-                          uint8_t irq_type,
-                          uint8_t bus,
-                          uint8_t device,
-                          uint8_t intx,
-                          uint8_t isa_irq);
-
-int xc_domain_bind_pt_pci_irq(xc_interface *xch,
-                              uint32_t domid,
-                              uint8_t machine_irq,
-                              uint8_t bus,
-                              uint8_t device,
-                              uint8_t intx);
-
-int xc_domain_bind_pt_isa_irq(xc_interface *xch,
-                              uint32_t domid,
-                              uint8_t machine_irq);
-
-int xc_domain_bind_pt_spi_irq(xc_interface *xch,
-                              uint32_t domid,
-                              uint16_t vspi,
-                              uint16_t spi);
-
-int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
-                                uint32_t domid,
-                                uint16_t vspi,
-                                uint16_t spi);
-
-/* Set the target domain */
-int xc_domain_set_target(xc_interface *xch,
-                         uint32_t domid,
-                         uint32_t target);
-
-/* Control the domain for debug */
-int xc_domain_debug_control(xc_interface *xch,
-                            uint32_t domid,
-                            uint32_t sop,
-                            uint32_t vcpu);
-
-#if defined(__i386__) || defined(__x86_64__)
-
-/*
- * CPUID policy data, expressed in the legacy XEND format.
- *
- * Policy is an array of strings, 32 chars long:
- *   policy[0] = eax
- *   policy[1] = ebx
- *   policy[2] = ecx
- *   policy[3] = edx
- *
- * The format of the string is the following:
- *   '1' -> force to 1
- *   '0' -> force to 0
- *   'x' -> we don't care (use default)
- *   'k' -> pass through host value
- *   's' -> legacy alias for 'k'
- */
-struct xc_xend_cpuid {
-    union {
-        struct {
-            uint32_t leaf, subleaf;
-        };
-        uint32_t input[2];
-    };
-    char *policy[4];
-};
-
-/*
- * Make adjustments to the CPUID settings for a domain.
- *
- * This path is used in two cases.  First, for fresh boots of the domain, and
- * secondly for migrate-in/restore of pre-4.14 guests (where CPUID data was
- * missing from the stream).  The @restore parameter distinguishes these
- * cases, and the generated policy must be compatible with a 4.13.
- *
- * Either pass a full new @featureset (and @nr_features), or adjust individual
- * features (@pae).
- *
- * Then (optionally) apply legacy XEND overrides (@xend) to the result.
- */
-int xc_cpuid_apply_policy(xc_interface *xch,
-                          uint32_t domid, bool restore,
-                          const uint32_t *featureset,
-                          unsigned int nr_features, bool pae,
-                          const struct xc_xend_cpuid *xend);
-int xc_mca_op(xc_interface *xch, struct xen_mc *mc);
-int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
-                        xc_cpumap_t cpumap, unsigned int nr_cpus);
-#endif
-
-struct xc_px_val {
-    uint64_t freq;        /* Px core frequency */
-    uint64_t residency;   /* Px residency time */
-    uint64_t count;       /* Px transition count */
-};
-
-struct xc_px_stat {
-    uint8_t total;        /* total Px states */
-    uint8_t usable;       /* usable Px states */
-    uint8_t last;         /* last Px state */
-    uint8_t cur;          /* current Px state */
-    uint64_t *trans_pt;   /* Px transition table */
-    struct xc_px_val *pt;
-};
-
-int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px);
-int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt);
-int xc_pm_reset_pxstat(xc_interface *xch, int cpuid);
-
-struct xc_cx_stat {
-    uint32_t nr;           /* entry nr in triggers[]/residencies[], incl C0 */
-    uint32_t last;         /* last Cx state */
-    uint64_t idle_time;    /* idle time from boot */
-    uint64_t *triggers;    /* Cx trigger counts */
-    uint64_t *residencies; /* Cx residencies */
-    uint32_t nr_pc;        /* entry nr in pc[] */
-    uint32_t nr_cc;        /* entry nr in cc[] */
-    uint64_t *pc;          /* 1-biased indexing (i.e. excl C0) */
-    uint64_t *cc;          /* 1-biased indexing (i.e. excl C0) */
-};
-typedef struct xc_cx_stat xc_cx_stat_t;
-
-int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx);
-int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt);
-int xc_pm_reset_cxstat(xc_interface *xch, int cpuid);
-
-int xc_cpu_online(xc_interface *xch, int cpu);
-int xc_cpu_offline(xc_interface *xch, int cpu);
-int xc_smt_enable(xc_interface *xch);
-int xc_smt_disable(xc_interface *xch);
-
-/* 
- * cpufreq para name of this structure named 
- * same as sysfs file name of native linux
- */
-typedef struct xen_userspace xc_userspace_t;
-typedef struct xen_ondemand xc_ondemand_t;
-
-struct xc_get_cpufreq_para {
-    /* IN/OUT variable */
-    uint32_t cpu_num;
-    uint32_t freq_num;
-    uint32_t gov_num;
-
-    /* for all governors */
-    /* OUT variable */
-    uint32_t *affected_cpus;
-    uint32_t *scaling_available_frequencies;
-    char     *scaling_available_governors;
-    char scaling_driver[CPUFREQ_NAME_LEN];
-
-    uint32_t cpuinfo_cur_freq;
-    uint32_t cpuinfo_max_freq;
-    uint32_t cpuinfo_min_freq;
-    uint32_t scaling_cur_freq;
-
-    char scaling_governor[CPUFREQ_NAME_LEN];
-    uint32_t scaling_max_freq;
-    uint32_t scaling_min_freq;
-
-    /* for specific governor */
-    union {
-        xc_userspace_t userspace;
-        xc_ondemand_t ondemand;
-    } u;
-
-    int32_t turbo_enabled;
-};
-
-int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
-                        struct xc_get_cpufreq_para *user_para);
-int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname);
-int xc_set_cpufreq_para(xc_interface *xch, int cpuid,
-                        int ctrl_type, int ctrl_value);
-int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq);
-
-int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value);
-
-int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value);
-int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value);
-
-int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value);
-int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value);
-
-int xc_enable_turbo(xc_interface *xch, int cpuid);
-int xc_disable_turbo(xc_interface *xch, int cpuid);
-
-/**
- * altp2m operations
- */
-
-int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state);
-int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state);
-int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
-                                     uint32_t vcpuid, xen_pfn_t gfn);
-int xc_altp2m_set_vcpu_disable_notify(xc_interface *handle, uint32_t domid,
-                                      uint32_t vcpuid);
-int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
-                          xenmem_access_t default_access, uint16_t *view_id);
-int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
-                           uint16_t view_id);
-/* Switch all vCPUs of the domain to the specified altp2m view */
-int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
-                             uint16_t view_id);
-int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
-                              uint16_t view_id, xen_pfn_t gfn, bool sve);
-int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
-                                   uint16_t view_id, xen_pfn_t first_gfn,
-                                   xen_pfn_t last_gfn, bool sve,
-                                   xen_pfn_t *error_gfn, int32_t *error_code);
-int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
-                              uint16_t view_id, xen_pfn_t gfn, bool *sve);
-int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
-                             uint16_t view_id, xen_pfn_t gfn,
-                             xenmem_access_t access);
-int xc_altp2m_set_mem_access_multi(xc_interface *handle, uint32_t domid,
-                                   uint16_t view_id, uint8_t *access,
-                                   uint64_t *gfns, uint32_t nr);
-int xc_altp2m_get_mem_access(xc_interface *handle, uint32_t domid,
-                             uint16_t view_id, xen_pfn_t gfn,
-                             xenmem_access_t *access);
-int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
-                         uint16_t view_id, xen_pfn_t old_gfn,
-                         xen_pfn_t new_gfn);
-int xc_altp2m_get_vcpu_p2m_idx(xc_interface *handle, uint32_t domid,
-                               uint32_t vcpuid, uint16_t *p2midx);
-/*
- * Set view visibility for xc_altp2m_switch_to_view and vmfunc.
- * Note: If altp2m mode is set to mixed the guest is able to change the view
- * visibility and then call vmfunc.
- */
-int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
-                             uint16_t view_id, bool visible);
-
-/** 
- * Mem paging operations.
- * Paging is supported only on the x86 architecture in 64 bit mode, with
- * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
- * support is considered experimental.
- */
-int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
-int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
-int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
-int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
-                           uint64_t gfn);
-int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
-int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
-int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
-                       uint64_t gfn, void *buffer);
-
-/** 
- * Access tracking operations.
- * Supported only on Intel EPT 64 bit processors.
- */
-
-/*
- * Set a range of memory to a specific access.
- * Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
- * XENMEM_access_ + (rwx), and XENMEM_access_rx2rw
- */
-int xc_set_mem_access(xc_interface *xch, uint32_t domain_id,
-                      xenmem_access_t access, uint64_t first_pfn,
-                      uint32_t nr);
-
-/*
- * Set an array of pages to their respective access in the access array.
- * The nr parameter specifies the size of the pages and access arrays.
- * The same allowed access types as for xc_set_mem_access() apply.
- */
-int xc_set_mem_access_multi(xc_interface *xch, uint32_t domain_id,
-                            uint8_t *access, uint64_t *pages,
-                            uint32_t nr);
-
-/*
- * Gets the mem access for the given page (returned in access on success)
- */
-int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
-                      uint64_t pfn, xenmem_access_t *access);
-
-/*
- * Returns the VM_EVENT_INTERFACE version.
- */
-int xc_vm_event_get_version(xc_interface *xch);
-
-/***
- * Monitor control operations.
- *
- * Enables the VM event monitor ring and returns the mapped ring page.
- * This ring is used to deliver mem_access events, as well a set of additional
- * events that can be enabled with the xc_monitor_* functions.
- *
- * Will return NULL on error.
- * Caller has to unmap this page when done.
- */
-void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
-int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
-int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
-/*
- * Get a bitmap of supported monitor events in the form
- * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
- */
-int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
-                                uint32_t *capabilities);
-int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
-                             uint16_t index, bool enable, bool sync,
-                             uint64_t bitmask, bool onchangeonly);
-/*
- * A list of MSR indices can usually be found in /usr/include/asm/msr-index.h.
- * Please consult the Intel/AMD manuals for more information on
- * non-architectural indices.
- */
-int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
-                          bool enable, bool onchangeonly);
-int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id, bool enable);
-int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
-                                   bool enable);
-int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
-                                 bool enable);
-int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id,
-                             bool enable, bool sync, bool allow_userspace);
-/*
- * Disables page-walk mem_access events by emulating. If the
- * emulation can not be performed then a VM_EVENT_REASON_EMUL_UNIMPLEMENTED
- * event will be issued.
- */
-int xc_monitor_inguest_pagefault(xc_interface *xch, uint32_t domain_id,
-                                 bool disable);
-int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
-                                bool enable, bool sync);
-int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable);
-int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
-                               bool enable);
-int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
-                                  bool enable);
-/**
- * This function enables / disables emulation for each REP for a
- * REP-compatible instruction.
- *
- * @parm xch a handle to an open hypervisor interface.
- * @parm domain_id the domain id one wants to get the node affinity of.
- * @parm enable if 0 optimize when possible, else emulate each REP.
- * @return 0 on success, -1 on failure.
- */
-int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
-                                bool enable);
-
-/***
- * Memory sharing operations.
- *
- * Unles otherwise noted, these calls return 0 on succes, -1 and errno on
- * failure.
- *
- * Sharing is supported only on the x86 architecture in 64 bit mode, with
- * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
- * support is considered experimental.
-
- * Calls below return ENOSYS if not in the x86_64 architecture.
- * Calls below return ENODEV if the domain does not support HAP.
- * Calls below return ESRCH if the specified domain does not exist.
- * Calls below return EPERM if the caller is unprivileged for this domain.
- */
-
-/* Turn on/off sharing for the domid, depending on the enable flag.
- *
- * Returns EXDEV if trying to enable and the domain has had a PCI device
- * assigned for passthrough (these two features are mutually exclusive).
- *
- * When sharing for a domain is turned off, the domain may still reference
- * shared pages. Unsharing happens lazily. */
-int xc_memshr_control(xc_interface *xch,
-                      uint32_t domid,
-                      int enable);
-
-/* Create a communication ring in which the hypervisor will place ENOMEM
- * notifications.
- *
- * ENOMEM happens when unsharing pages: a Copy-on-Write duplicate needs to be
- * allocated, and thus the out-of-memory error occurr.
- *
- * For complete examples on how to plumb a notification ring, look into
- * xenpaging or xen-access.
- *
- * On receipt of a notification, the helper should ensure there is memory
- * available to the domain before retrying.
- *
- * If a domain encounters an ENOMEM condition when sharing and this ring
- * has not been set up, the hypervisor will crash the domain.
- *
- * Fails with:
- *  EINVAL if port is NULL
- *  EINVAL if the sharing ring has already been enabled
- *  ENOSYS if no guest gfn has been specified to host the ring via an hvm param
- *  EINVAL if the gfn for the ring has not been populated
- *  ENOENT if the gfn for the ring is paged out, or cannot be unshared
- *  EINVAL if the gfn for the ring cannot be written to
- *  EINVAL if the domain is dying
- *  ENOSPC if an event channel cannot be allocated for the ring
- *  ENOMEM if memory cannot be allocated for internal data structures
- *  EINVAL or EACCESS if the request is denied by the security policy
- */
-
-int xc_memshr_ring_enable(xc_interface *xch,
-                          uint32_t domid,
-                          uint32_t *port);
-/* Disable the ring for ENOMEM communication.
- * May fail with EINVAL if the ring was not enabled in the first place.
- */
-int xc_memshr_ring_disable(xc_interface *xch,
-                           uint32_t domid);
-
-/*
- * Calls below return EINVAL if sharing has not been enabled for the domain
- * Calls below return EINVAL if the domain is dying
- */
-/* Once a reponse to an ENOMEM notification is prepared, the tool can
- * notify the hypervisor to re-schedule the faulting vcpu of the domain with an
- * event channel kick and/or this call. */
-int xc_memshr_domain_resume(xc_interface *xch,
-                            uint32_t domid);
-
-/* Select a page for sharing.
- *
- * A 64 bit opaque handle will be stored in handle.  The hypervisor ensures
- * that if the page is modified, the handle will be invalidated, and future
- * users of it will fail. If the page has already been selected and is still
- * associated to a valid handle, the existing handle will be returned.
- *
- * May fail with:
- *  EINVAL if the gfn is not populated or not sharable (mmio, etc)
- *  ENOMEM if internal data structures cannot be allocated
- *  E2BIG if the page is being referenced by other subsytems (e.g. qemu)
- *  ENOENT or EEXIST if there are internal hypervisor errors.
- */
-int xc_memshr_nominate_gfn(xc_interface *xch,
-                           uint32_t domid,
-                           unsigned long gfn,
-                           uint64_t *handle);
-/* Same as above, but instead of a guest frame number, the input is a grant
- * reference provided by the guest.
- *
- * May fail with EINVAL if the grant reference is invalid.
- */
-int xc_memshr_nominate_gref(xc_interface *xch,
-                            uint32_t domid,
-                            grant_ref_t gref,
-                            uint64_t *handle);
-
-/* The three calls below may fail with
- * 10 (or -XENMEM_SHARING_OP_S_HANDLE_INVALID) if the handle passed as source
- * is invalid.
- * 9 (or -XENMEM_SHARING_OP_C_HANDLE_INVALID) if the handle passed as client is
- * invalid.
- */
-/* Share two nominated guest pages.
- *
- * If the call succeeds, both pages will point to the same backing frame (or
- * mfn). The hypervisor will verify the handles are still valid, but it will
- * not perform any sanity checking on the contens of the pages (the selection
- * mechanism for sharing candidates is entirely up to the user-space tool).
- *
- * After successful sharing, the client handle becomes invalid. Both <domain,
- * gfn> tuples point to the same mfn with the same handle, the one specified as
- * source. Either 3-tuple can be specified later for further re-sharing.
- */
-int xc_memshr_share_gfns(xc_interface *xch,
-                    uint32_t source_domain,
-                    unsigned long source_gfn,
-                    uint64_t source_handle,
-                    uint32_t client_domain,
-                    unsigned long client_gfn,
-                    uint64_t client_handle);
-
-/* Same as above, but share two grant references instead.
- *
- * May fail with EINVAL if either grant reference is invalid.
- */
-int xc_memshr_share_grefs(xc_interface *xch,
-                    uint32_t source_domain,
-                    grant_ref_t source_gref,
-                    uint64_t source_handle,
-                    uint32_t client_domain,
-                    grant_ref_t client_gref,
-                    uint64_t client_handle);
-
-/* Allows to add to the guest physmap of the client domain a shared frame
- * directly.
- *
- * May additionally fail with
- *  9 (-XENMEM_SHARING_OP_C_HANDLE_INVALID) if the physmap entry for the gfn is
- *  not suitable.
- *  ENOMEM if internal data structures cannot be allocated.
- *  ENOENT if there is an internal hypervisor error.
- */
-int xc_memshr_add_to_physmap(xc_interface *xch,
-                    uint32_t source_domain,
-                    unsigned long source_gfn,
-                    uint64_t source_handle,
-                    uint32_t client_domain,
-                    unsigned long client_gfn);
-
-/* Allows to deduplicate a range of memory of a client domain. Using
- * this function is equivalent of calling xc_memshr_nominate_gfn for each gfn
- * in the two domains followed by xc_memshr_share_gfns.
- *
- * May fail with -EINVAL if the source and client domain have different
- * memory size or if memory sharing is not enabled on either of the domains.
- * May also fail with -ENOMEM if there isn't enough memory available to store
- * the sharing metadata before deduplication can happen.
- */
-int xc_memshr_range_share(xc_interface *xch,
-                          uint32_t source_domain,
-                          uint32_t client_domain,
-                          uint64_t first_gfn,
-                          uint64_t last_gfn);
-
-int xc_memshr_fork(xc_interface *xch,
-                   uint32_t source_domain,
-                   uint32_t client_domain,
-                   bool allow_with_iommu,
-                   bool block_interrupts);
-
-/*
- * Note: this function is only intended to be used on short-lived forks that
- * haven't yet aquired a lot of memory. In case the fork has a lot of memory
- * it is likely more performant to create a new fork with xc_memshr_fork.
- *
- * With VMs that have a lot of memory this call may block for a long time.
- */
-int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain);
-
-/* Debug calls: return the number of pages referencing the shared frame backing
- * the input argument. Should be one or greater.
- *
- * May fail with EINVAL if there is no backing shared frame for the input
- * argument.
- */
-int xc_memshr_debug_gfn(xc_interface *xch,
-                        uint32_t domid,
-                        unsigned long gfn);
-/* May additionally fail with EINVAL if the grant reference is invalid. */
-int xc_memshr_debug_gref(xc_interface *xch,
-                         uint32_t domid,
-                         grant_ref_t gref);
-
-/* Audits the share subsystem.
- *
- * Returns ENOSYS if not supported (may not be compiled into the hypervisor).
- *
- * Returns the number of errors found during auditing otherwise. May be (should
- * be!) zero.
- *
- * If debugtrace support has been compiled into the hypervisor and is enabled,
- * verbose descriptions for the errors are available in the hypervisor console.
- */
-int xc_memshr_audit(xc_interface *xch);
-
-/* Stats reporting.
- *
- * At any point in time, the following equality should hold for a host:
- *
- *  Let dominfo(d) be the xc_dominfo_t struct filled by a call to
- *  xc_domain_getinfo(d)
- *
- *  The summation of dominfo(d)->shr_pages for all domains in the system
- *      should be equal to
- *  xc_sharing_freed_pages + xc_sharing_used_frames
- */
-/*
- * This function returns the total number of pages freed by using sharing
- * on the system.  For example, if two domains contain a single entry in
- * their p2m table that points to the same shared page (and no other pages
- * in the system are shared), then this function should return 1.
- */
-long xc_sharing_freed_pages(xc_interface *xch);
-
-/*
- * This function returns the total number of frames occupied by shared
- * pages on the system.  This is independent of the number of domains
- * pointing at these frames.  For example, in the above scenario this
- * should return 1. (And dominfo(d) for each of the two domains should return 1
- * as well).
- *
- * Note that some of these sharing_used_frames may be referenced by
- * a single domain page, and thus not realize any savings. The same
- * applies to some of the pages counted in dominfo(d)->shr_pages.
- */
-long xc_sharing_used_frames(xc_interface *xch);
-/*** End sharing interface ***/
-
-int xc_flask_load(xc_interface *xc_handle, char *buf, uint32_t size);
-int xc_flask_context_to_sid(xc_interface *xc_handle, char *buf, uint32_t size, uint32_t *sid);
-int xc_flask_sid_to_context(xc_interface *xc_handle, int sid, char *buf, uint32_t size);
-int xc_flask_getenforce(xc_interface *xc_handle);
-int xc_flask_setenforce(xc_interface *xc_handle, int mode);
-int xc_flask_getbool_byid(xc_interface *xc_handle, int id, char *name, uint32_t size, int *curr, int *pend);
-int xc_flask_getbool_byname(xc_interface *xc_handle, char *name, int *curr, int *pend);
-int xc_flask_setbool(xc_interface *xc_handle, char *name, int value, int commit);
-int xc_flask_add_pirq(xc_interface *xc_handle, unsigned int pirq, char *scontext);
-int xc_flask_add_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high,
-                      char *scontext);
-int xc_flask_add_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high,
-                     char *scontext);
-int xc_flask_add_device(xc_interface *xc_handle, unsigned long device, char *scontext);
-int xc_flask_del_pirq(xc_interface *xc_handle, unsigned int pirq);
-int xc_flask_del_ioport(xc_interface *xc_handle, unsigned long low, unsigned long high);
-int xc_flask_del_iomem(xc_interface *xc_handle, unsigned long low, unsigned long high);
-int xc_flask_del_device(xc_interface *xc_handle, unsigned long device);
-int xc_flask_access(xc_interface *xc_handle, const char *scon, const char *tcon,
-                  uint16_t tclass, uint32_t req,
-                  uint32_t *allowed, uint32_t *decided,
-                  uint32_t *auditallow, uint32_t *auditdeny,
-                  uint32_t *seqno);
-int xc_flask_avc_cachestats(xc_interface *xc_handle, char *buf, int size);
-int xc_flask_policyvers(xc_interface *xc_handle);
-int xc_flask_avc_hashstats(xc_interface *xc_handle, char *buf, int size);
-int xc_flask_getavc_threshold(xc_interface *xc_handle);
-int xc_flask_setavc_threshold(xc_interface *xc_handle, int threshold);
-int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid);
-
-struct elf_binary;
-void xc_elf_set_logfile(xc_interface *xch, struct elf_binary *elf,
-                        int verbose);
-/* Useful for callers who also use libelf. */
-
-/*
- * Execute an image previously loaded with xc_kexec_load().
- *
- * Does not return on success.
- *
- * Fails with:
- *   ENOENT if the specified image has not been loaded.
- */
-int xc_kexec_exec(xc_interface *xch, int type);
-
-/*
- * Find the machine address and size of certain memory areas.
- *
- *   KEXEC_RANGE_MA_CRASH       crash area
- *   KEXEC_RANGE_MA_XEN         Xen itself
- *   KEXEC_RANGE_MA_CPU         CPU note for CPU number 'nr'
- *   KEXEC_RANGE_MA_XENHEAP     xenheap
- *   KEXEC_RANGE_MA_EFI_MEMMAP  EFI Memory Map
- *   KEXEC_RANGE_MA_VMCOREINFO  vmcoreinfo
- *
- * Fails with:
- *   EINVAL if the range or CPU number isn't valid.
- */
-int xc_kexec_get_range(xc_interface *xch, int range,  int nr,
-                       uint64_t *size, uint64_t *start);
-
-/*
- * Load a kexec image into memory.
- *
- * The image may be of type KEXEC_TYPE_DEFAULT (executed on request)
- * or KEXEC_TYPE_CRASH (executed on a crash).
- *
- * The image architecture may be a 32-bit variant of the hypervisor
- * architecture (e.g, EM_386 on a x86-64 hypervisor).
- *
- * Fails with:
- *   ENOMEM if there is insufficient memory for the new image.
- *   EINVAL if the image does not fit into the crash area or the entry
- *          point isn't within one of segments.
- *   EBUSY  if another image is being executed.
- */
-int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch,
-                  uint64_t entry_maddr,
-                  uint32_t nr_segments, xen_kexec_segment_t *segments);
-
-/*
- * Unload a kexec image.
- *
- * This prevents a KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH image from
- * being executed.  The crash images are not cleared from the crash
- * region.
- */
-int xc_kexec_unload(xc_interface *xch, int type);
-
-/*
- * Find out whether the image has been succesfully loaded.
- *
- * The type can be either KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH.
- * If zero is returned, that means no image is loaded for the type.
- * If one is returned, that means an image is loaded for the type.
- * Otherwise, negative return value indicates error.
- */
-int xc_kexec_status(xc_interface *xch, int type);
-
-typedef xenpf_resource_entry_t xc_resource_entry_t;
-
-/*
- * Generic resource operation which contains multiple non-preemptible
- * resource access entries that passed to xc_resource_op().
- */
-struct xc_resource_op {
-    uint64_t result;        /* on return, check this field first */
-    uint32_t cpu;           /* which cpu to run */
-    uint32_t nr_entries;    /* number of resource entries */
-    xc_resource_entry_t *entries;
-};
-
-typedef struct xc_resource_op xc_resource_op_t;
-int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops);
-
-#if defined(__i386__) || defined(__x86_64__)
-enum xc_psr_cmt_type {
-    XC_PSR_CMT_L3_OCCUPANCY,
-    XC_PSR_CMT_TOTAL_MEM_COUNT,
-    XC_PSR_CMT_LOCAL_MEM_COUNT,
-};
-typedef enum xc_psr_cmt_type xc_psr_cmt_type;
-
-enum xc_psr_type {
-    XC_PSR_CAT_L3_CBM      = 1,
-    XC_PSR_CAT_L3_CBM_CODE = 2,
-    XC_PSR_CAT_L3_CBM_DATA = 3,
-    XC_PSR_CAT_L2_CBM      = 4,
-    XC_PSR_MBA_THRTL       = 5,
-};
-typedef enum xc_psr_type xc_psr_type;
-
-enum xc_psr_feat_type {
-    XC_PSR_CAT_L3,
-    XC_PSR_CAT_L2,
-    XC_PSR_MBA,
-};
-typedef enum xc_psr_feat_type xc_psr_feat_type;
-
-union xc_psr_hw_info {
-    struct {
-        uint32_t cos_max;
-        uint32_t cbm_len;
-        bool     cdp_enabled;
-    } cat;
-
-    struct {
-        uint32_t cos_max;
-        uint32_t thrtl_max;
-        bool     linear;
-    } mba;
-};
-typedef union xc_psr_hw_info xc_psr_hw_info;
-
-int xc_psr_cmt_attach(xc_interface *xch, uint32_t domid);
-int xc_psr_cmt_detach(xc_interface *xch, uint32_t domid);
-int xc_psr_cmt_get_domain_rmid(xc_interface *xch, uint32_t domid,
-                               uint32_t *rmid);
-int xc_psr_cmt_get_total_rmid(xc_interface *xch, uint32_t *total_rmid);
-int xc_psr_cmt_get_l3_upscaling_factor(xc_interface *xch,
-                                       uint32_t *upscaling_factor);
-int xc_psr_cmt_get_l3_event_mask(xc_interface *xch, uint32_t *event_mask);
-int xc_psr_cmt_get_l3_cache_size(xc_interface *xch, uint32_t cpu,
-                                 uint32_t *l3_cache_size);
-int xc_psr_cmt_get_data(xc_interface *xch, uint32_t rmid, uint32_t cpu,
-                        uint32_t psr_cmt_type, uint64_t *monitor_data,
-                        uint64_t *tsc);
-int xc_psr_cmt_enabled(xc_interface *xch);
-
-int xc_psr_set_domain_data(xc_interface *xch, uint32_t domid,
-                           xc_psr_type type, uint32_t target,
-                           uint64_t data);
-int xc_psr_get_domain_data(xc_interface *xch, uint32_t domid,
-                           xc_psr_type type, uint32_t target,
-                           uint64_t *data);
-int xc_psr_get_hw_info(xc_interface *xch, uint32_t socket,
-                       xc_psr_feat_type type, xc_psr_hw_info *hw_info);
-
-int xc_get_cpu_levelling_caps(xc_interface *xch, uint32_t *caps);
-int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
-                          uint32_t *nr_features, uint32_t *featureset);
-
-int xc_get_cpu_policy_size(xc_interface *xch, uint32_t *nr_leaves,
-                           uint32_t *nr_msrs);
-int xc_get_system_cpu_policy(xc_interface *xch, uint32_t index,
-                             uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
-                             uint32_t *nr_msrs, xen_msr_entry_t *msrs);
-int xc_get_domain_cpu_policy(xc_interface *xch, uint32_t domid,
-                             uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
-                             uint32_t *nr_msrs, xen_msr_entry_t *msrs);
-int xc_set_domain_cpu_policy(xc_interface *xch, uint32_t domid,
-                             uint32_t nr_leaves, xen_cpuid_leaf_t *leaves,
-                             uint32_t nr_msrs, xen_msr_entry_t *msrs,
-                             uint32_t *err_leaf_p, uint32_t *err_subleaf_p,
-                             uint32_t *err_msr_p);
-
-uint32_t xc_get_cpu_featureset_size(void);
-
-enum xc_static_cpu_featuremask {
-    XC_FEATUREMASK_KNOWN,
-    XC_FEATUREMASK_SPECIAL,
-    XC_FEATUREMASK_PV_MAX,
-    XC_FEATUREMASK_PV_DEF,
-    XC_FEATUREMASK_HVM_SHADOW_MAX,
-    XC_FEATUREMASK_HVM_SHADOW_DEF,
-    XC_FEATUREMASK_HVM_HAP_MAX,
-    XC_FEATUREMASK_HVM_HAP_DEF,
-};
-const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
-
-#endif
-
-int xc_livepatch_upload(xc_interface *xch,
-                        char *name, unsigned char *payload, uint32_t size);
-
-int xc_livepatch_get(xc_interface *xch,
-                     char *name,
-                     xen_livepatch_status_t *status);
-
-/*
- * Get a number of available payloads and get actual total size of
- * the payloads' name and metadata arrays.
- *
- * This functions is typically executed first before the xc_livepatch_list()
- * to obtain the sizes and correctly allocate all necessary data resources.
- *
- * The return value is zero if the hypercall completed successfully.
- *
- * If there was an error performing the sysctl operation, the return value
- * will contain the hypercall error code value.
- */
-int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
-                                uint32_t *name_total_size,
-                                uint32_t *metadata_total_size);
-
-/*
- * The heart of this function is to get an array of the following objects:
- *   - xen_livepatch_status_t: states and return codes of payloads
- *   - name: names of payloads
- *   - len: lengths of corresponding payloads' names
- *   - metadata: payloads' metadata
- *   - metadata_len: lengths of corresponding payloads' metadata
- *
- * However it is complex because it has to deal with the hypervisor
- * returning some of the requested data or data being stale
- * (another hypercall might alter the list).
- *
- * The parameters that the function expects to contain data from
- * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
- * 'left' are also updated with the number of entries filled out
- * and respectively the number of entries left to get from hypervisor.
- *
- * It is expected that the caller of this function will first issue the
- * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
- * and all metadata as well as the current number of payload entries.
- * The total sizes are required and supplied via the 'name_total_size' and
- * 'metadata_total_size' parameters.
- *
- * The 'max' is to be provided by the caller with the maximum number of
- * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
- * can be filled up with.
- *
- * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
- * structure size.
- *
- * Each entry in the 'name' array may have an arbitrary size.
- *
- * Each entry in the 'len' array is expected to be of uint32_t size.
- *
- * Each entry in the 'metadata' array may have an arbitrary size.
- *
- * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
- *
- * The return value is zero if the hypercall completed successfully.
- * Note that the return value is _not_ the amount of entries filled
- * out - that is saved in 'done'.
- *
- * If there was an error performing the operation, the return value
- * will contain an negative -EXX type value. The 'done' and 'left'
- * will contain the number of entries that had been succesfully
- * retrieved (if any).
- */
-int xc_livepatch_list(xc_interface *xch, const unsigned int max,
-                      const unsigned int start,
-                      struct xen_livepatch_status *info,
-                      char *name, uint32_t *len,
-                      const uint32_t name_total_size,
-                      char *metadata, uint32_t *metadata_len,
-                      const uint32_t metadata_total_size,
-                      unsigned int *done, unsigned int *left);
-
-/*
- * The operations are asynchronous and the hypervisor may take a while
- * to complete them. The `timeout` offers an option to expire the
- * operation if it could not be completed within the specified time
- * (in ns). Value of 0 means let hypervisor decide the best timeout.
- * The `flags` allows to pass extra parameters to the actions.
- */
-int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
-int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
-int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
-int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags);
-
-/*
- * Ensure cache coherency after memory modifications. A call to this function
- * is only required on ARM as the x86 architecture provides cache coherency
- * guarantees. Calling this function on x86 is allowed but has no effect.
- */
-int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
-                         xen_pfn_t start_pfn, xen_pfn_t nr_pfns);
-
-/* Compat shims */
-#include "xenctrl_compat.h"
-
-#endif /* XENCTRL_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/include/xenctrl_compat.h b/tools/libxc/include/xenctrl_compat.h
deleted file mode 100644 (file)
index 464f645..0000000
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Compat shims for use of 3rd party consumers of libxenctrl
- * functionality which has been split into separate libraries.
- *
- * New code should use the separate libraries.
- *
- * Each interface must be opted-into separately by defining:
- *
- * XC_WANT_COMPAT_EVTCHN_API
- *  - Functions relating to /dev/xen/evtchn
- */
-#ifndef XENCTRL_COMPAT_H
-#define XENCTRL_COMPAT_H
-
-#ifdef XC_WANT_COMPAT_MAP_FOREIGN_API
-/**
- * Memory maps a range within one domain to a local address range.  Mappings
- * should be unmapped with munmap and should follow the same rules as mmap
- * regarding page alignment.  Returns NULL on failure.
- *
- * @parm xch a handle on an open hypervisor interface
- * @parm dom the domain to map memory from
- * @parm size the amount of memory to map (in multiples of page size)
- * @parm prot same flag as in mmap().
- * @parm mfn the frame address to map.
- */
-void *xc_map_foreign_range(xc_interface *xch, uint32_t dom,
-                            int size, int prot,
-                            unsigned long mfn );
-
-void *xc_map_foreign_pages(xc_interface *xch, uint32_t dom, int prot,
-                           const xen_pfn_t *arr, int num );
-
-/* Nothing within the library itself other than the compat wrapper
- * itself should be using this, everything inside has access to
- * xenforeignmemory_map().
- */
-#if !defined(XC_INTERNAL_COMPAT_MAP_FOREIGN_API) || \
-     defined(XC_BUILDING_COMPAT_MAP_FOREIGN_API)
-/**
- * Like xc_map_foreign_pages(), except it can succeed partially.
- * When a page cannot be mapped, its respective field in @err is
- * set to the corresponding errno value.
- */
-void *xc_map_foreign_bulk(xc_interface *xch, uint32_t dom, int prot,
-                          const xen_pfn_t *arr, int *err, unsigned int num);
-#endif
-
-#endif
-
-#ifdef XC_WANT_COMPAT_EVTCHN_API
-
-typedef struct xenevtchn_handle xc_evtchn;
-typedef xc_evtchn_port_or_error_t evtchn_port_or_error_t;
-
-xc_evtchn *xc_evtchn_open(xentoollog_logger *logger,
-                             unsigned open_flags);
-int xc_evtchn_close(xc_evtchn *xce);
-int xc_evtchn_fd(xc_evtchn *xce);
-int xc_evtchn_notify(xc_evtchn *xce, evtchn_port_t port);
-xc_evtchn_port_or_error_t
-xc_evtchn_bind_unbound_port(xc_evtchn *xce, uint32_t domid);
-xc_evtchn_port_or_error_t
-xc_evtchn_bind_interdomain(xc_evtchn *xce, uint32_t domid,
-                           evtchn_port_t remote_port);
-xc_evtchn_port_or_error_t
-xc_evtchn_bind_virq(xc_evtchn *xce, unsigned int virq);
-int xc_evtchn_unbind(xc_evtchn *xce, evtchn_port_t port);
-xc_evtchn_port_or_error_t
-xc_evtchn_pending(xc_evtchn *xce);
-int xc_evtchn_unmask(xc_evtchn *xce, evtchn_port_t port);
-
-#endif /* XC_WANT_COMPAT_EVTCHN_API */
-
-#ifdef XC_WANT_COMPAT_GNTTAB_API
-
-typedef struct xengntdev_handle xc_gnttab;
-
-xc_gnttab *xc_gnttab_open(xentoollog_logger *logger,
-                          unsigned open_flags);
-int xc_gnttab_close(xc_gnttab *xcg);
-void *xc_gnttab_map_grant_ref(xc_gnttab *xcg,
-                              uint32_t domid,
-                              uint32_t ref,
-                              int prot);
-void *xc_gnttab_map_grant_refs(xc_gnttab *xcg,
-                               uint32_t count,
-                               uint32_t *domids,
-                               uint32_t *refs,
-                               int prot);
-void *xc_gnttab_map_domain_grant_refs(xc_gnttab *xcg,
-                                      uint32_t count,
-                                      uint32_t domid,
-                                      uint32_t *refs,
-                                      int prot);
-void *xc_gnttab_map_grant_ref_notify(xc_gnttab *xcg,
-                                     uint32_t domid,
-                                     uint32_t ref,
-                                     int prot,
-                                     uint32_t notify_offset,
-                                     evtchn_port_t notify_port);
-int xc_gnttab_munmap(xc_gnttab *xcg,
-                     void *start_address,
-                     uint32_t count);
-int xc_gnttab_set_max_grants(xc_gnttab *xcg,
-                             uint32_t count);
-
-typedef struct xengntdev_handle xc_gntshr;
-
-xc_gntshr *xc_gntshr_open(xentoollog_logger *logger,
-                          unsigned open_flags);
-int xc_gntshr_close(xc_gntshr *xcg);
-void *xc_gntshr_share_pages(xc_gntshr *xcg, uint32_t domid,
-                            int count, uint32_t *refs, int writable);
-void *xc_gntshr_share_page_notify(xc_gntshr *xcg, uint32_t domid,
-                                  uint32_t *ref, int writable,
-                                  uint32_t notify_offset,
-                                  evtchn_port_t notify_port);
-int xc_gntshr_munmap(xc_gntshr *xcg, void *start_address, uint32_t count);
-
-#endif /* XC_WANT_COMPAT_GNTTAB_API */
-
-#ifdef XC_WANT_COMPAT_DEVICEMODEL_API
-
-int xc_hvm_create_ioreq_server(
-    xc_interface *xch, uint32_t domid, int handle_bufioreq,
-    ioservid_t *id);
-int xc_hvm_get_ioreq_server_info(
-    xc_interface *xch, uint32_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
-    xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_port);
-int xc_hvm_map_io_range_to_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
-    uint64_t start, uint64_t end);
-int xc_hvm_unmap_io_range_from_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
-    uint64_t start, uint64_t end);
-int xc_hvm_map_pcidev_to_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
-    uint8_t bus, uint8_t device, uint8_t function);
-int xc_hvm_unmap_pcidev_from_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
-    uint8_t bus, uint8_t device, uint8_t function);
-int xc_hvm_destroy_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id);
-int xc_hvm_set_ioreq_server_state(
-    xc_interface *xch, uint32_t domid, ioservid_t id, int enabled);
-int xc_hvm_set_pci_intx_level(
-    xc_interface *xch, uint32_t domid, uint16_t segment, uint8_t bus,
-    uint8_t device, uint8_t intx, unsigned int level);
-int xc_hvm_set_isa_irq_level(
-    xc_interface *xch, uint32_t domid, uint8_t irq, unsigned int level);
-int xc_hvm_set_pci_link_route(
-    xc_interface *xch, uint32_t domid, uint8_t link, uint8_t irq);
-int xc_hvm_inject_msi(
-    xc_interface *xch, uint32_t domid, uint64_t msi_addr, uint32_t msi_data);
-int xc_hvm_track_dirty_vram(
-    xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr,
-    unsigned long *dirty_bitmap);
-int xc_hvm_modified_memory(
-    xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr);
-int xc_hvm_set_mem_type(
-    xc_interface *xch, uint32_t domid, hvmmem_type_t type,
-    uint64_t first_pfn, uint32_t nr);
-int xc_hvm_inject_trap(
-    xc_interface *xch, uint32_t domid, int vcpu, uint8_t vector,
-    uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t cr2);
-int xc_domain_pin_memory_cacheattr(
-    xc_interface *xch, uint32_t domid, uint64_t start, uint64_t end,
-    uint32_t type);
-
-#endif /* XC_WANT_COMPAT_DEVICEMODEL_API */
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/include/xenctrl_dom.h b/tools/libxc/include/xenctrl_dom.h
deleted file mode 100644 (file)
index 40b85b7..0000000
+++ /dev/null
@@ -1,455 +0,0 @@
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _XC_DOM_H
-#define _XC_DOM_H
-
-#include <xen/libelf/libelf.h>
-
-#define X86_HVM_NR_SPECIAL_PAGES    8
-#define X86_HVM_END_SPECIAL_REGION  0xff000u
-#define XG_MAX_MODULES 2
-
-/* --- typedefs and structs ---------------------------------------- */
-
-typedef uint64_t xen_vaddr_t;
-typedef uint64_t xen_paddr_t;
-
-#define PRIpfn PRI_xen_pfn
-
-struct xc_dom_seg {
-    xen_vaddr_t vstart;
-    xen_vaddr_t vend;
-    xen_pfn_t pfn;
-    xen_pfn_t pages;
-};
-
-struct xc_hvm_firmware_module {
-    uint8_t  *data;
-    uint32_t  length;
-    uint64_t  guest_addr_out;
-};
-
-struct xc_dom_mem {
-    struct xc_dom_mem *next;
-    void *ptr;
-    enum {
-        XC_DOM_MEM_TYPE_MALLOC_INTERNAL,
-        XC_DOM_MEM_TYPE_MALLOC_EXTERNAL,
-        XC_DOM_MEM_TYPE_MMAP,
-    } type;
-    size_t len;
-    unsigned char memory[0];
-};
-
-struct xc_dom_phys {
-    struct xc_dom_phys *next;
-    void *ptr;
-    xen_pfn_t first;
-    xen_pfn_t count;
-};
-
-struct xc_dom_module {
-    void *blob;
-    size_t size;
-    void *cmdline;
-    /* If seg.vstart is non zero then the module will be loaded at that
-     * address, otherwise it will automatically placed.
-     *
-     * If automatic placement is used and the module is gzip
-     * compressed then it will be decompressed as it is loaded. If the
-     * module has been explicitly placed then it is loaded as is
-     * otherwise decompressing risks undoing the manual placement.
-     */
-    struct xc_dom_seg seg;
-};
-
-struct xc_dom_image {
-    /* files */
-    void *kernel_blob;
-    size_t kernel_size;
-    unsigned int num_modules;
-    struct xc_dom_module modules[XG_MAX_MODULES];
-    void *devicetree_blob;
-    size_t devicetree_size;
-
-    size_t max_kernel_size;
-    size_t max_module_size;
-    size_t max_devicetree_size;
-
-    /* arguments and parameters */
-    char *cmdline;
-    size_t cmdline_size;
-    uint32_t f_requested[XENFEAT_NR_SUBMAPS];
-
-    /* info from (elf) kernel image */
-    struct elf_dom_parms parms;
-    char *guest_type;
-
-    /* memory layout */
-    struct xc_dom_seg kernel_seg;
-    struct xc_dom_seg p2m_seg;
-    struct xc_dom_seg pgtables_seg;
-    struct xc_dom_seg devicetree_seg;
-    struct xc_dom_seg start_info_seg;
-    xen_pfn_t start_info_pfn;
-    xen_pfn_t console_pfn;
-    xen_pfn_t xenstore_pfn;
-    xen_pfn_t shared_info_pfn;
-    xen_pfn_t bootstack_pfn;
-    xen_pfn_t pfn_alloc_end;
-    xen_vaddr_t virt_alloc_end;
-    xen_vaddr_t bsd_symtab_start;
-
-    /*
-     * initrd parameters as specified in start_info page
-     * Depending on capabilities of the booted kernel this may be a virtual
-     * address or a pfn. Type is neutral and large enough to hold a virtual
-     * address of a 64 bit kernel even with 32 bit toolstack.
-     */
-    uint64_t initrd_start;
-    uint64_t initrd_len;
-
-    unsigned int alloc_bootstack;
-    xen_vaddr_t virt_pgtab_end;
-
-    /* other state info */
-    uint32_t f_active[XENFEAT_NR_SUBMAPS];
-
-    /*
-     * pv_p2m is specific to x86 PV guests, and maps GFNs to MFNs.  It is
-     * eventually copied into guest context.
-     */
-    xen_pfn_t *pv_p2m;
-
-    /* physical memory
-     *
-     * An x86 PV guest has one or more blocks of physical RAM,
-     * consisting of total_pages starting at 0. The start address and
-     * size of each block is controlled by vNUMA structures.
-     *
-     * An ARM guest has GUEST_RAM_BANKS regions of RAM, with
-     * rambank_size[i] pages in each. The lowest RAM address
-     * (corresponding to the base of the p2m arrays above) is stored
-     * in rambase_pfn.
-     */
-    xen_pfn_t rambase_pfn;
-    xen_pfn_t total_pages;
-    xen_pfn_t p2m_size;         /* number of pfns covered by p2m */
-    struct xc_dom_phys *phys_pages;
-#if defined (__arm__) || defined(__aarch64__)
-    xen_pfn_t rambank_size[GUEST_RAM_BANKS];
-#endif
-
-    /* malloc memory pool */
-    struct xc_dom_mem *memblocks;
-
-    /* memory footprint stats */
-    size_t alloc_malloc;
-    size_t alloc_mem_map;
-    size_t alloc_file_map;
-    size_t alloc_domU_map;
-
-    /* misc xen domain config stuff */
-    unsigned long flags;
-    unsigned int console_evtchn;
-    unsigned int xenstore_evtchn;
-    uint32_t console_domid;
-    uint32_t xenstore_domid;
-    xen_pfn_t shared_info_mfn;
-
-    xc_interface *xch;
-    uint32_t guest_domid;
-    int claim_enabled; /* 0 by default, 1 enables it */
-
-    int xen_version;
-    xen_capabilities_info_t xen_caps;
-
-    /* kernel loader, arch hooks */
-    struct xc_dom_loader *kernel_loader;
-    void *private_loader;
-
-    /* vNUMA information */
-    xen_vmemrange_t *vmemranges;
-    unsigned int nr_vmemranges;
-    unsigned int *vnode_to_pnode;
-    unsigned int nr_vnodes;
-
-    /* domain type/architecture specific data */
-    void *arch_private;
-
-    /* kernel loader */
-    struct xc_dom_arch *arch_hooks;
-    /* allocate up to pfn_alloc_end */
-    int (*allocate) (struct xc_dom_image * dom);
-
-    /* Container type (HVM or PV). */
-    enum {
-        XC_DOM_PV_CONTAINER,
-        XC_DOM_HVM_CONTAINER,
-    } container_type;
-
-    /* HVM specific fields. */
-    xen_pfn_t target_pages;
-    xen_paddr_t mmio_start;
-    xen_paddr_t mmio_size;
-    xen_paddr_t lowmem_end;
-    xen_paddr_t highmem_end;
-    xen_pfn_t vga_hole_size;
-
-    /* If unset disables the setup of the IOREQ pages. */
-    bool device_model;
-
-    /* BIOS/Firmware passed to HVMLOADER */
-    struct xc_hvm_firmware_module system_firmware_module;
-
-    /* Extra ACPI tables */
-#define MAX_ACPI_MODULES        4
-    struct xc_hvm_firmware_module acpi_modules[MAX_ACPI_MODULES];
-
-    /* Extra SMBIOS structures passed to HVMLOADER */
-    struct xc_hvm_firmware_module smbios_module;
-
-#if defined(__i386__) || defined(__x86_64__)
-    struct e820entry *e820;
-    unsigned int e820_entries;
-#endif
-
-    xen_pfn_t vuart_gfn;
-
-    /* Number of vCPUs */
-    unsigned int max_vcpus;
-};
-
-/* --- pluggable kernel loader ------------------------------------- */
-
-struct xc_dom_loader {
-    char *name;
-    /* Sadly the error returns from these functions are not consistent: */
-    elf_negerrnoval (*probe) (struct xc_dom_image * dom);
-    elf_negerrnoval (*parser) (struct xc_dom_image * dom);
-    elf_errorstatus (*loader) (struct xc_dom_image * dom);
-
-    struct xc_dom_loader *next;
-};
-
-#define __init __attribute__ ((constructor))
-void xc_dom_register_loader(struct xc_dom_loader *loader);
-
-/* --- arch specific hooks ----------------------------------------- */
-
-struct xc_dom_arch {
-    int (*alloc_magic_pages) (struct xc_dom_image * dom);
-
-    /* pagetable setup - x86 PV only */
-    int (*alloc_pgtables) (struct xc_dom_image * dom);
-    int (*alloc_p2m_list) (struct xc_dom_image * dom);
-    int (*setup_pgtables) (struct xc_dom_image * dom);
-
-    /* arch-specific data structs setup */
-    /* in Mini-OS environment start_info might be a macro, avoid collision. */
-#undef start_info
-    int (*start_info) (struct xc_dom_image * dom);
-    int (*shared_info) (struct xc_dom_image * dom, void *shared_info);
-    int (*vcpu) (struct xc_dom_image * dom);
-    int (*bootearly) (struct xc_dom_image * dom);
-    int (*bootlate) (struct xc_dom_image * dom);
-
-    /* arch-specific memory initialization. */
-    int (*meminit) (struct xc_dom_image * dom);
-
-    char *guest_type;
-    char *native_protocol;
-    int page_shift;
-    int sizeof_pfn;
-    int p2m_base_supported;
-    int arch_private_size;
-
-    struct xc_dom_arch *next;
-};
-void xc_dom_register_arch_hooks(struct xc_dom_arch *hooks);
-
-#define XC_DOM_PAGE_SHIFT(dom)  ((dom)->arch_hooks->page_shift)
-#define XC_DOM_PAGE_SIZE(dom)   (1LL << (dom)->arch_hooks->page_shift)
-
-/* --- main functions ---------------------------------------------- */
-
-struct xc_dom_image *xc_dom_allocate(xc_interface *xch,
-                                     const char *cmdline, const char *features);
-void xc_dom_release_phys(struct xc_dom_image *dom);
-void xc_dom_release(struct xc_dom_image *dom);
-int xc_dom_rambase_init(struct xc_dom_image *dom, uint64_t rambase);
-int xc_dom_mem_init(struct xc_dom_image *dom, unsigned int mem_mb);
-
-/* Set this larger if you have enormous modules/kernels. Note that
- * you should trust all kernels not to be maliciously large (e.g. to
- * exhaust all dom0 memory) if you do this (see CVE-2012-4544 /
- * XSA-25). You can also set the default independently for
- * modules/kernels in xc_dom_allocate() or call
- * xc_dom_{kernel,module}_max_size.
- */
-#ifndef XC_DOM_DECOMPRESS_MAX
-#define XC_DOM_DECOMPRESS_MAX (1024*1024*1024) /* 1GB */
-#endif
-
-int xc_dom_kernel_check_size(struct xc_dom_image *dom, size_t sz);
-int xc_dom_kernel_max_size(struct xc_dom_image *dom, size_t sz);
-
-int xc_dom_module_max_size(struct xc_dom_image *dom, size_t sz);
-
-int xc_dom_devicetree_max_size(struct xc_dom_image *dom, size_t sz);
-
-size_t xc_dom_check_gzip(xc_interface *xch,
-                     void *blob, size_t ziplen);
-int xc_dom_do_gunzip(xc_interface *xch,
-                     void *src, size_t srclen, void *dst, size_t dstlen);
-int xc_dom_try_gunzip(struct xc_dom_image *dom, void **blob, size_t * size);
-
-int xc_dom_kernel_file(struct xc_dom_image *dom, const char *filename);
-int xc_dom_module_file(struct xc_dom_image *dom, const char *filename,
-                       const char *cmdline);
-int xc_dom_kernel_mem(struct xc_dom_image *dom, const void *mem,
-                      size_t memsize);
-int xc_dom_module_mem(struct xc_dom_image *dom, const void *mem,
-                       size_t memsize, const char *cmdline);
-int xc_dom_devicetree_file(struct xc_dom_image *dom, const char *filename);
-int xc_dom_devicetree_mem(struct xc_dom_image *dom, const void *mem,
-                          size_t memsize);
-
-int xc_dom_parse_image(struct xc_dom_image *dom);
-int xc_dom_set_arch_hooks(struct xc_dom_image *dom);
-int xc_dom_build_image(struct xc_dom_image *dom);
-
-int xc_dom_boot_xen_init(struct xc_dom_image *dom, xc_interface *xch,
-                         uint32_t domid);
-int xc_dom_boot_mem_init(struct xc_dom_image *dom);
-void *xc_dom_boot_domU_map(struct xc_dom_image *dom, xen_pfn_t pfn,
-                           xen_pfn_t count);
-int xc_dom_boot_image(struct xc_dom_image *dom);
-int xc_dom_compat_check(struct xc_dom_image *dom);
-int xc_dom_gnttab_init(struct xc_dom_image *dom);
-int xc_dom_gnttab_seed(xc_interface *xch, uint32_t guest_domid,
-                       bool is_hvm,
-                       xen_pfn_t console_gfn,
-                       xen_pfn_t xenstore_gfn,
-                       uint32_t console_domid,
-                       uint32_t xenstore_domid);
-bool xc_dom_translated(const struct xc_dom_image *dom);
-
-/* --- debugging bits ---------------------------------------------- */
-
-int xc_dom_loginit(xc_interface *xch);
-
-void xc_dom_printf(xc_interface *xch, const char *fmt, ...)
-     __attribute__ ((format(printf, 2, 3)));
-void xc_dom_panic_func(xc_interface *xch,
-                      const char *file, int line, xc_error_code err,
-                      const char *fmt, ...)
-    __attribute__ ((format(printf, 5, 6)));
-
-#define xc_dom_panic(xch, err, fmt, args...) \
-    xc_dom_panic_func(xch, __FILE__, __LINE__, err, fmt, ## args)
-#define xc_dom_trace(mark) \
-    xc_dom_printf("%s:%d: trace %s\n", __FILE__, __LINE__, mark)
-
-void xc_dom_log_memory_footprint(struct xc_dom_image *dom);
-
-/* --- simple memory pool ------------------------------------------ */
-
-void *xc_dom_malloc(struct xc_dom_image *dom, size_t size);
-int xc_dom_register_external(struct xc_dom_image *dom, void *ptr, size_t size);
-void *xc_dom_malloc_page_aligned(struct xc_dom_image *dom, size_t size);
-void *xc_dom_malloc_filemap(struct xc_dom_image *dom,
-                            const char *filename, size_t * size,
-                            const size_t max_size);
-char *xc_dom_strdup(struct xc_dom_image *dom, const char *str);
-
-/* --- alloc memory pool ------------------------------------------- */
-
-xen_pfn_t xc_dom_alloc_page(struct xc_dom_image *dom, char *name);
-int xc_dom_alloc_segment(struct xc_dom_image *dom,
-                         struct xc_dom_seg *seg, char *name,
-                         xen_vaddr_t start, xen_vaddr_t size);
-
-/* --- misc bits --------------------------------------------------- */
-
-void *xc_dom_pfn_to_ptr(struct xc_dom_image *dom, xen_pfn_t first,
-                        xen_pfn_t count);
-void *xc_dom_pfn_to_ptr_retcount(struct xc_dom_image *dom, xen_pfn_t first,
-                                 xen_pfn_t count, xen_pfn_t *count_out);
-void xc_dom_unmap_one(struct xc_dom_image *dom, xen_pfn_t pfn);
-void xc_dom_unmap_all(struct xc_dom_image *dom);
-
-static inline void *xc_dom_seg_to_ptr_pages(struct xc_dom_image *dom,
-                                      struct xc_dom_seg *seg,
-                                      xen_pfn_t *pages_out)
-{
-    void *retval;
-
-    retval = xc_dom_pfn_to_ptr(dom, seg->pfn, seg->pages);
-
-    *pages_out = retval ? seg->pages : 0;
-    return retval;
-}
-
-static inline void *xc_dom_seg_to_ptr(struct xc_dom_image *dom,
-                                      struct xc_dom_seg *seg)
-{
-    xen_pfn_t dummy;
-
-    return xc_dom_seg_to_ptr_pages(dom, seg, &dummy);
-}
-
-static inline void *xc_dom_vaddr_to_ptr(struct xc_dom_image *dom,
-                                        xen_vaddr_t vaddr,
-                                        size_t *safe_region_out)
-{
-    unsigned int page_size = XC_DOM_PAGE_SIZE(dom);
-    xen_pfn_t page = (vaddr - dom->parms.virt_base) / page_size;
-    unsigned int offset = (vaddr - dom->parms.virt_base) % page_size;
-    xen_pfn_t safe_region_count;
-    void *ptr;
-
-    *safe_region_out = 0;
-    ptr = xc_dom_pfn_to_ptr_retcount(dom, page, 0, &safe_region_count);
-    if ( ptr == NULL )
-        return ptr;
-    *safe_region_out = (safe_region_count << XC_DOM_PAGE_SHIFT(dom)) - offset;
-    return ptr + offset;
-}
-
-static inline xen_pfn_t xc_dom_p2m(struct xc_dom_image *dom, xen_pfn_t pfn)
-{
-    if ( xc_dom_translated(dom) )
-        return pfn;
-
-    /* x86 PV only now. */
-    if ( pfn >= dom->total_pages )
-        return INVALID_MFN;
-
-    return dom->pv_p2m[pfn];
-}
-
-#endif /* _XC_DOM_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_altp2m.c b/tools/libxc/xc_altp2m.c
deleted file mode 100644 (file)
index 6987c95..0000000
+++ /dev/null
@@ -1,436 +0,0 @@
-/******************************************************************************
- *
- * xc_altp2m.c
- *
- * Interface to altp2m related HVMOPs
- *
- * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-#include <stdbool.h>
-#include <xen/hvm/hvm_op.h>
-
-int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_get_domain_state;
-    arg->domain = dom;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    if ( !rc )
-        *state = arg->u.domain_state.state;
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_set_domain_state;
-    arg->domain = dom;
-    arg->u.domain_state.state = state;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
-                                     uint32_t vcpuid, xen_pfn_t gfn)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_vcpu_enable_notify;
-    arg->domain = domid;
-    arg->u.enable_notify.vcpu_id = vcpuid;
-    arg->u.enable_notify.gfn = gfn;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_set_vcpu_disable_notify(xc_interface *handle, uint32_t domid,
-                                      uint32_t vcpuid)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_vcpu_disable_notify;
-    arg->domain = domid;
-    arg->u.disable_notify.vcpu_id = vcpuid;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
-                          xenmem_access_t default_access, uint16_t *view_id)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_create_p2m;
-    arg->domain = domid;
-    arg->u.view.view = -1;
-    arg->u.view.hvmmem_default_access = default_access;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    if ( !rc )
-        *view_id = arg->u.view.view;
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
-                           uint16_t view_id)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_destroy_p2m;
-    arg->domain = domid;
-    arg->u.view.view = view_id;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-/* Switch all vCPUs of the domain to the specified altp2m view */
-int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
-                             uint16_t view_id)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_switch_p2m;
-    arg->domain = domid;
-    arg->u.view.view = view_id;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
-                              uint16_t view_id, xen_pfn_t gfn, bool *sve)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_get_suppress_ve;
-    arg->domain = domid;
-    arg->u.suppress_ve.view = view_id;
-    arg->u.suppress_ve.gfn = gfn;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    if ( !rc )
-        *sve = arg->u.suppress_ve.suppress_ve;
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
-                              uint16_t view_id, xen_pfn_t gfn, bool sve)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_set_suppress_ve;
-    arg->domain = domid;
-    arg->u.suppress_ve.view = view_id;
-    arg->u.suppress_ve.gfn = gfn;
-    arg->u.suppress_ve.suppress_ve = sve;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
-                                   uint16_t view_id, xen_pfn_t first_gfn,
-                                   xen_pfn_t last_gfn, bool sve,
-                                   xen_pfn_t *error_gfn, int32_t *error_code)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_set_suppress_ve_multi;
-    arg->domain = domid;
-    arg->u.suppress_ve_multi.view = view_id;
-    arg->u.suppress_ve_multi.first_gfn = first_gfn;
-    arg->u.suppress_ve_multi.last_gfn = last_gfn;
-    arg->u.suppress_ve_multi.suppress_ve = sve;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    if ( arg->u.suppress_ve_multi.first_error )
-    {
-        *error_gfn = arg->u.suppress_ve_multi.first_error_gfn;
-        *error_code = arg->u.suppress_ve_multi.first_error;
-    }
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
-                             uint16_t view_id, xen_pfn_t gfn,
-                             xenmem_access_t access)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_set_mem_access;
-    arg->domain = domid;
-    arg->u.mem_access.view = view_id;
-    arg->u.mem_access.access = access;
-    arg->u.mem_access.gfn = gfn;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
-                         uint16_t view_id, xen_pfn_t old_gfn,
-                         xen_pfn_t new_gfn)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_change_gfn;
-    arg->domain = domid;
-    arg->u.change_gfn.view = view_id;
-    arg->u.change_gfn.old_gfn = old_gfn;
-    arg->u.change_gfn.new_gfn = new_gfn;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_set_mem_access_multi(xc_interface *xch, uint32_t domid,
-                                   uint16_t view_id, uint8_t *access,
-                                   uint64_t *gfns, uint32_t nr)
-{
-    int rc;
-
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-    DECLARE_HYPERCALL_BOUNCE(access, nr * sizeof(*access),
-                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    DECLARE_HYPERCALL_BOUNCE(gfns, nr * sizeof(*gfns),
-                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_set_mem_access_multi;
-    arg->domain = domid;
-    arg->u.set_mem_access_multi.view = view_id;
-    arg->u.set_mem_access_multi.nr = nr;
-
-    if ( xc_hypercall_bounce_pre(xch, gfns) ||
-         xc_hypercall_bounce_pre(xch, access) )
-    {
-        PERROR("Could not bounce memory for HVMOP_altp2m_set_mem_access_multi");
-        return -1;
-    }
-
-    set_xen_guest_handle(arg->u.set_mem_access_multi.pfn_list, gfns);
-    set_xen_guest_handle(arg->u.set_mem_access_multi.access_list, access);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-    xc_hypercall_bounce_post(xch, access);
-    xc_hypercall_bounce_post(xch, gfns);
-
-    return rc;
-}
-
-int xc_altp2m_get_mem_access(xc_interface *handle, uint32_t domid,
-                             uint16_t view_id, xen_pfn_t gfn,
-                             xenmem_access_t *access)
-{
-    int rc;
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_get_mem_access;
-    arg->domain = domid;
-    arg->u.mem_access.view = view_id;
-    arg->u.mem_access.gfn = gfn;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                 HYPERCALL_BUFFER_AS_ARG(arg));
-
-    if ( !rc )
-        *access = arg->u.mem_access.access;
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_get_vcpu_p2m_idx(xc_interface *handle, uint32_t domid,
-                               uint32_t vcpuid, uint16_t *altp2m_idx)
-{
-    int rc;
-
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_get_p2m_idx;
-    arg->domain = domid;
-    arg->u.get_vcpu_p2m_idx.vcpu_id = vcpuid;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                 HYPERCALL_BUFFER_AS_ARG(arg));
-    if ( !rc )
-        *altp2m_idx = arg->u.get_vcpu_p2m_idx.altp2m_idx;
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_altp2m_set_visibility(xc_interface *handle, uint32_t domid,
-                             uint16_t view_id, bool visible)
-{
-    int rc;
-
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
-    arg->cmd = HVMOP_altp2m_set_visibility;
-    arg->domain = domid;
-    arg->u.set_visibility.altp2m_idx = view_id;
-    arg->u.set_visibility.visible = visible;
-
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
diff --git a/tools/libxc/xc_arinc653.c b/tools/libxc/xc_arinc653.c
deleted file mode 100644 (file)
index 5d61c1a..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/******************************************************************************
- * xc_arinc653.c
- * 
- * XC interface to the ARINC653 scheduler
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Copyright (c) 2010 DornerWorks, Ltd. <DornerWorks.com>
- */
-
-#include "xc_private.h"
-
-int
-xc_sched_arinc653_schedule_set(
-    xc_interface *xch,
-    uint32_t cpupool_id,
-    struct xen_sysctl_arinc653_schedule *schedule)
-{
-    int rc;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(
-        schedule,
-        sizeof(*schedule),
-        XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, schedule) )
-        return -1;
-
-    sysctl.cmd = XEN_SYSCTL_scheduler_op;
-    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
-    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
-    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
-    set_xen_guest_handle(sysctl.u.scheduler_op.u.sched_arinc653.schedule,
-            schedule);
-
-    rc = do_sysctl(xch, &sysctl);
-
-    xc_hypercall_bounce_post(xch, schedule);
-
-    return rc;
-}
-
-int
-xc_sched_arinc653_schedule_get(
-    xc_interface *xch,
-    uint32_t cpupool_id,
-    struct xen_sysctl_arinc653_schedule *schedule)
-{
-    int rc;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(
-        schedule,
-        sizeof(*schedule),
-        XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( xc_hypercall_bounce_pre(xch, schedule) )
-        return -1;
-
-    sysctl.cmd = XEN_SYSCTL_scheduler_op;
-    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
-    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
-    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_getinfo;
-    set_xen_guest_handle(sysctl.u.scheduler_op.u.sched_arinc653.schedule,
-            schedule);
-
-    rc = do_sysctl(xch, &sysctl);
-
-    xc_hypercall_bounce_post(xch, schedule);
-
-    return rc;
-}
diff --git a/tools/libxc/xc_bitops.h b/tools/libxc/xc_bitops.h
deleted file mode 100644 (file)
index 0951e82..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef XC_BITOPS_H
-#define XC_BITOPS_H 1
-
-/* bitmap operations for single threaded access */
-
-#include <stdlib.h>
-#include <string.h>
-
-/* Needed by several includees, but no longer used for bitops. */
-#define BITS_PER_LONG (sizeof(unsigned long) * 8)
-#define ORDER_LONG (sizeof(unsigned long) == 4 ? 5 : 6)
-
-#define BITMAP_ENTRY(_nr,_bmap) ((_bmap))[(_nr) / 8]
-#define BITMAP_SHIFT(_nr) ((_nr) % 8)
-
-/* calculate required space for number of bytes needed to hold nr_bits */
-static inline int bitmap_size(int nr_bits)
-{
-    return (nr_bits + 7) / 8;
-}
-
-static inline void *bitmap_alloc(int nr_bits)
-{
-    return calloc(1, bitmap_size(nr_bits));
-}
-
-static inline void bitmap_set(void *addr, int nr_bits)
-{
-    memset(addr, 0xff, bitmap_size(nr_bits));
-}
-
-static inline void bitmap_clear(void *addr, int nr_bits)
-{
-    memset(addr, 0, bitmap_size(nr_bits));
-}
-
-static inline int test_bit(int nr, const void *_addr)
-{
-    const char *addr = _addr;
-    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
-}
-
-static inline void clear_bit(int nr, void *_addr)
-{
-    char *addr = _addr;
-    BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
-}
-
-static inline void set_bit(int nr, void *_addr)
-{
-    char *addr = _addr;
-    BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
-}
-
-static inline int test_and_clear_bit(int nr, void *addr)
-{
-    int oldbit = test_bit(nr, addr);
-    clear_bit(nr, addr);
-    return oldbit;
-}
-
-static inline int test_and_set_bit(int nr, void *addr)
-{
-    int oldbit = test_bit(nr, addr);
-    set_bit(nr, addr);
-    return oldbit;
-}
-
-static inline void bitmap_or(void *_dst, const void *_other,
-                             int nr_bits)
-{
-    char *dst = _dst;
-    const char *other = _other;
-    int i;
-    for ( i = 0; i < bitmap_size(nr_bits); ++i )
-        dst[i] |= other[i];
-}
-
-#endif  /* XC_BITOPS_H */
diff --git a/tools/libxc/xc_core.c b/tools/libxc/xc_core.c
deleted file mode 100644 (file)
index e8c6fb9..0000000
+++ /dev/null
@@ -1,1008 +0,0 @@
-/*
- * Elf format, (pfn, gmfn) table, IA64 support.
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * xen dump-core file format follows ELF format specification.
- * Analisys tools shouldn't depends on the order of sections.
- * They should follow elf header and check section names.
- *
- *  +--------------------------------------------------------+
- *  |ELF header                                              |
- *  +--------------------------------------------------------+
- *  |section headers                                         |
- *  |    null section header                                 |
- *  |    .shstrtab                                           |
- *  |    .note.Xen                                           |
- *  |    .xen_prstatus                                       |
- *  |    .xen_shared_info if present                         |
- *  |    .xen_pages                                          |
- *  |    .xen_p2m or .xen_pfn                                |
- *  +--------------------------------------------------------+
- *  |.note.Xen:note section                                  |
- *  |    "Xen" is used as note name,                         |
- *  |    types are defined in xen/include/public/elfnote.h   |
- *  |    and descriptors are defined in xc_core.h.           |
- *  |    dumpcore none                                       |
- *  |    dumpcore header                                     |
- *  |    dumpcore xen version                                |
- *  |    dumpcore format version                             |
- *  +--------------------------------------------------------+
- *  |.xen_prstatus                                           |
- *  |       vcpu_guest_context_t[nr_vcpus]                   |
- *  +--------------------------------------------------------+
- *  |.xen_shared_info if possible                            |
- *  +--------------------------------------------------------+
- *  |.xen_pages                                              |
- *  |    page * nr_pages                                     |
- *  +--------------------------------------------------------+
- *  |.xen_p2m or .xen_pfn                                    |
- *  |    .xen_p2m: struct xen_dumpcore_p2m[nr_pages]         |
- *  |    .xen_pfn: uint64_t[nr_pages]                        |
- *  +--------------------------------------------------------+
- *  |.shstrtab: section header string table                  |
- *  +--------------------------------------------------------+
- *
- */
-
-#include "xc_private.h"
-#include "xc_core.h"
-#include <stdlib.h>
-#include <unistd.h>
-
-#include <xen/libelf/libelf.h>
-
-/* number of pages to write at a time */
-#define DUMP_INCREMENT (4 * 1024)
-
-/* string table */
-struct xc_core_strtab {
-    char       *strings;
-    uint16_t    length;
-    uint16_t    max;
-};
-
-static struct xc_core_strtab*
-xc_core_strtab_init(xc_interface *xch)
-{
-    struct xc_core_strtab *strtab;
-    char *strings;
-    strtab = malloc(sizeof(*strtab));
-    if ( strtab == NULL )
-        return NULL;
-
-    strings = malloc(PAGE_SIZE);
-    if ( strings == NULL )
-    {
-        PERROR("Could not allocate string table init");
-        free(strtab);
-        return NULL;
-    }
-    strtab->strings = strings;
-    strtab->max = PAGE_SIZE;
-
-    /* index 0 represents none */
-    strtab->strings[0] = '\0';
-    strtab->length = 1;
-
-    return strtab;
-}
-
-static void
-xc_core_strtab_free(struct xc_core_strtab *strtab)
-{
-    free(strtab->strings);
-    free(strtab);
-}
-
-static uint16_t
-xc_core_strtab_get(xc_interface *xch, struct xc_core_strtab *strtab, const char *name)
-{
-    uint16_t ret = 0;
-    uint16_t len = strlen(name) + 1;
-
-    if ( strtab->length > UINT16_MAX - len )
-    {
-        PERROR("too long string table");
-        errno = E2BIG;
-        return ret;
-    }
-    
-    if ( strtab->length + len > strtab->max )
-    {
-        char *tmp;
-        if ( strtab->max > UINT16_MAX / 2 )
-        {
-            PERROR("too long string table");
-            errno = ENOMEM;
-            return ret;
-        }
-
-        tmp = realloc(strtab->strings, strtab->max * 2);
-        if ( tmp == NULL )
-        {
-            PERROR("Could not allocate string table");
-            return ret;
-        }
-
-        strtab->strings = tmp;
-        strtab->max *= 2;
-    }
-
-    ret = strtab->length;
-    strcpy(strtab->strings + strtab->length, name);
-    strtab->length += len;
-    return ret;
-}
-
-
-/* section headers */
-struct xc_core_section_headers {
-    uint16_t    num;
-    uint16_t    num_max;
-
-    Elf64_Shdr  *shdrs;
-};
-#define SHDR_INIT       ((uint16_t)16)
-#define SHDR_INC        ((uint16_t)4)
-
-static struct xc_core_section_headers*
-xc_core_shdr_init(xc_interface *xch)
-{
-    struct xc_core_section_headers *sheaders;
-    sheaders = malloc(sizeof(*sheaders));
-    if ( sheaders == NULL )
-        return NULL;
-
-    sheaders->num = 0;
-    sheaders->num_max = SHDR_INIT;
-    sheaders->shdrs = malloc(sizeof(sheaders->shdrs[0]) * sheaders->num_max);
-    if ( sheaders->shdrs == NULL )
-    {
-        free(sheaders);
-        return NULL;
-    }
-    return sheaders;
-}
-
-static void
-xc_core_shdr_free(struct xc_core_section_headers *sheaders)
-{
-    free(sheaders->shdrs);
-    free(sheaders);
-}
-
-Elf64_Shdr*
-xc_core_shdr_get(xc_interface *xch,
-                 struct xc_core_section_headers *sheaders)
-{
-    Elf64_Shdr *shdr;
-
-    if ( sheaders->num == sheaders->num_max )
-    {
-        Elf64_Shdr *shdrs;
-        if ( sheaders->num_max > UINT16_MAX - SHDR_INC )
-        {
-            errno = E2BIG;
-            return NULL;
-        }
-        sheaders->num_max += SHDR_INC;
-        shdrs = realloc(sheaders->shdrs,
-                        sizeof(sheaders->shdrs[0]) * sheaders->num_max);
-        if ( shdrs == NULL )
-            return NULL;
-        sheaders->shdrs = shdrs;
-    }
-
-    shdr = &sheaders->shdrs[sheaders->num];
-    sheaders->num++;
-    memset(shdr, 0, sizeof(*shdr));
-    return shdr;
-}
-
-int
-xc_core_shdr_set(xc_interface *xch,
-                 Elf64_Shdr *shdr,
-                 struct xc_core_strtab *strtab,
-                 const char *name, uint32_t type,
-                 uint64_t offset, uint64_t size,
-                 uint64_t addralign, uint64_t entsize)
-{
-    uint64_t name_idx = xc_core_strtab_get(xch, strtab, name);
-    if ( name_idx == 0 )
-        return -1;
-
-    shdr->sh_name = name_idx;
-    shdr->sh_type = type;
-    shdr->sh_offset = offset;
-    shdr->sh_size = size;
-    shdr->sh_addralign = addralign;
-    shdr->sh_entsize = entsize;
-    return 0;
-}
-
-static void
-xc_core_ehdr_init(Elf64_Ehdr *ehdr)
-{
-    memset(ehdr, 0, sizeof(*ehdr));
-    ehdr->e_ident[EI_MAG0] = ELFMAG0;
-    ehdr->e_ident[EI_MAG1] = ELFMAG1;
-    ehdr->e_ident[EI_MAG2] = ELFMAG2;
-    ehdr->e_ident[EI_MAG3] = ELFMAG3;
-    ehdr->e_ident[EI_CLASS] = ELFCLASS64;
-    ehdr->e_ident[EI_DATA] = ELF_ARCH_DATA;
-    ehdr->e_ident[EI_VERSION] = EV_CURRENT;
-    ehdr->e_ident[EI_OSABI] = ELFOSABI_SYSV;
-    ehdr->e_ident[EI_ABIVERSION] = EV_CURRENT;
-
-    ehdr->e_type = ET_CORE;
-    /* e_machine will be filled in later */
-    ehdr->e_version = EV_CURRENT;
-    ehdr->e_entry = 0;
-    ehdr->e_phoff = 0;
-    ehdr->e_shoff = sizeof(*ehdr);
-    ehdr->e_flags = ELF_CORE_EFLAGS;
-    ehdr->e_ehsize = sizeof(*ehdr);
-    ehdr->e_phentsize = sizeof(Elf64_Phdr);
-    ehdr->e_phnum = 0;
-    ehdr->e_shentsize = sizeof(Elf64_Shdr);
-    /* ehdr->e_shnum and ehdr->e_shstrndx aren't known here yet.
-     * fill it later */
-}
-
-static int
-elfnote_fill_xen_version(xc_interface *xch,
-                         struct xen_dumpcore_elfnote_xen_version_desc
-                         *xen_version)
-{
-    int rc;
-    memset(xen_version, 0, sizeof(*xen_version));
-
-    rc = xc_version(xch, XENVER_version, NULL);
-    if ( rc < 0 )
-        return rc;
-    xen_version->major_version = rc >> 16;
-    xen_version->minor_version = rc & ((1 << 16) - 1);
-
-    rc = xc_version(xch, XENVER_extraversion,
-                    &xen_version->extra_version);
-    if ( rc < 0 )
-        return rc;
-
-    rc = xc_version(xch, XENVER_compile_info,
-                    &xen_version->compile_info);
-    if ( rc < 0 )
-        return rc;
-
-    rc = xc_version(xch,
-                    XENVER_capabilities, &xen_version->capabilities);
-    if ( rc < 0 )
-        return rc;
-
-    rc = xc_version(xch, XENVER_changeset, &xen_version->changeset);
-    if ( rc < 0 )
-        return rc;
-
-    rc = xc_version(xch, XENVER_platform_parameters,
-                    &xen_version->platform_parameters);
-    if ( rc < 0 )
-        return rc;
-
-    rc = xc_version(xch, XENVER_pagesize, NULL);
-    if ( rc < 0 )
-        return rc;
-    xen_version->pagesize = rc;
-
-    return 0;
-}
-
-static void
-elfnote_fill_format_version(struct xen_dumpcore_elfnote_format_version_desc
-                            *format_version)
-{
-    format_version->version = XEN_DUMPCORE_FORMAT_VERSION_CURRENT;
-}
-
-static void
-elfnote_init(struct elfnote *elfnote)
-{
-    /* elf note section */
-    memset(elfnote, 0, sizeof(*elfnote));
-    elfnote->namesz = strlen(XEN_DUMPCORE_ELFNOTE_NAME) + 1;
-    strncpy(elfnote->name, XEN_DUMPCORE_ELFNOTE_NAME, sizeof(elfnote->name));
-}
-
-static int
-elfnote_dump_none(xc_interface *xch, void *args, dumpcore_rtn_t dump_rtn)
-{
-    int sts;
-    struct elfnote elfnote;
-    struct xen_dumpcore_elfnote_none_desc none;
-
-    elfnote_init(&elfnote);
-    /* Avoid compile warning about constant-zero-sized memset(). */
-    /*memset(&none, 0, sizeof(none));*/
-
-    elfnote.descsz = sizeof(none);
-    elfnote.type = XEN_ELFNOTE_DUMPCORE_NONE;
-    sts = dump_rtn(xch, args, (char*)&elfnote, sizeof(elfnote));
-    if ( sts != 0 )
-        return sts;
-    return dump_rtn(xch, args, (char*)&none, sizeof(none));
-}
-
-static int
-elfnote_dump_core_header(
-    xc_interface *xch,
-    void *args, dumpcore_rtn_t dump_rtn, const xc_dominfo_t *info,
-    int nr_vcpus, unsigned long nr_pages)
-{
-    int sts;
-    struct elfnote elfnote;
-    struct xen_dumpcore_elfnote_header_desc header;
-    
-    elfnote_init(&elfnote);
-    memset(&header, 0, sizeof(header));
-    
-    elfnote.descsz = sizeof(header);
-    elfnote.type = XEN_ELFNOTE_DUMPCORE_HEADER;
-    header.xch_magic = info->hvm ? XC_CORE_MAGIC_HVM : XC_CORE_MAGIC;
-    header.xch_nr_vcpus = nr_vcpus;
-    header.xch_nr_pages = nr_pages;
-    header.xch_page_size = PAGE_SIZE;
-    sts = dump_rtn(xch, args, (char*)&elfnote, sizeof(elfnote));
-    if ( sts != 0 )
-        return sts;
-    return dump_rtn(xch, args, (char*)&header, sizeof(header));
-}
-
-static int
-elfnote_dump_xen_version(xc_interface *xch, void *args,
-                         dumpcore_rtn_t dump_rtn, unsigned int guest_width)
-{
-    int sts;
-    struct elfnote elfnote;
-    struct xen_dumpcore_elfnote_xen_version_desc xen_version;
-
-    elfnote_init(&elfnote);
-    memset(&xen_version, 0, sizeof(xen_version));
-
-    elfnote.descsz = sizeof(xen_version);
-    elfnote.type = XEN_ELFNOTE_DUMPCORE_XEN_VERSION;
-    elfnote_fill_xen_version(xch, &xen_version);
-    if (guest_width < sizeof(unsigned long))
-    {
-        // 32 bit elf file format differs in pagesize's alignment
-        char *p = (char *)&xen_version.pagesize;
-        memmove(p - 4, p, sizeof(xen_version.pagesize));
-    }
-    sts = dump_rtn(xch, args, (char*)&elfnote, sizeof(elfnote));
-    if ( sts != 0 )
-        return sts;
-    return dump_rtn(xch, args, (char*)&xen_version, sizeof(xen_version));
-}
-
-static int
-elfnote_dump_format_version(xc_interface *xch,
-                            void *args, dumpcore_rtn_t dump_rtn)
-{
-    int sts;
-    struct elfnote elfnote;
-    struct xen_dumpcore_elfnote_format_version_desc format_version;
-
-    elfnote_init(&elfnote);
-    memset(&format_version, 0, sizeof(format_version));
-    
-    elfnote.descsz = sizeof(format_version);
-    elfnote.type = XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION;
-    elfnote_fill_format_version(&format_version);
-    sts = dump_rtn(xch, args, (char*)&elfnote, sizeof(elfnote));
-    if ( sts != 0 )
-        return sts;
-    return dump_rtn(xch, args, (char*)&format_version, sizeof(format_version));
-}
-
-int
-xc_domain_dumpcore_via_callback(xc_interface *xch,
-                                uint32_t domid,
-                                void *args,
-                                dumpcore_rtn_t dump_rtn)
-{
-    xc_dominfo_t info;
-    shared_info_any_t *live_shinfo = NULL;
-    struct domain_info_context _dinfo = {};
-    struct domain_info_context *dinfo = &_dinfo;
-
-    int nr_vcpus = 0;
-    char *dump_mem, *dump_mem_start = NULL;
-    vcpu_guest_context_any_t *ctxt = NULL;
-    struct xc_core_arch_context arch_ctxt;
-    char dummy[PAGE_SIZE];
-    int dummy_len;
-    int sts = -1;
-
-    unsigned long i;
-    unsigned long j;
-    unsigned long nr_pages;
-
-    xc_core_memory_map_t *memory_map = NULL;
-    unsigned int nr_memory_map;
-    unsigned int map_idx;
-
-    int auto_translated_physmap;
-    xen_pfn_t *p2m = NULL;
-    struct xen_dumpcore_p2m *p2m_array = NULL;
-
-    uint64_t *pfn_array = NULL;
-
-    Elf64_Ehdr ehdr;
-    uint64_t filesz;
-    uint64_t offset;
-    uint64_t fixup;
-
-    struct xc_core_strtab *strtab = NULL;
-    uint16_t strtab_idx;
-    struct xc_core_section_headers *sheaders = NULL;
-    Elf64_Shdr *shdr;
-    xc_core_arch_context_init(&arch_ctxt);
-    if ( (dump_mem_start = malloc(DUMP_INCREMENT*PAGE_SIZE)) == NULL )
-    {
-        PERROR("Could not allocate dump_mem");
-        goto out;
-    }
-
-    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
-    {
-        PERROR("Could not get info for domain");
-        goto out;
-    }
-    /* Map the shared info frame */
-    live_shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
-                                       PROT_READ, info.shared_info_frame);
-    if ( !live_shinfo && !info.hvm )
-    {
-        PERROR("Couldn't map live_shinfo");
-        goto out;
-    }
-    auto_translated_physmap = xc_core_arch_auto_translated_physmap(&info);
-
-    if ( !auto_translated_physmap )
-
-    {
-        if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
-        {
-            PERROR("Could not get address size for domain");
-            goto out;
-        }
-    }
-    else
-    {
-        /*
-         * Autotranslated guest never sets guest width in the first
-         * place. Force guest_width to be sizeof(unsigned long) so
-         * code below functions properly.
-         *
-         * Here is why this is correct.
-         *
-         * 1. Before f969bc9fc, xc_domain_get_guest_width for HVM (x86
-         * and ARM) always returned hypervisor's idea of
-         * sizeof(unsigned long).
-         *
-         * 2. There has never been a situation in which hypervisor's
-         * word width is smaller than toolstack domain's (i.e. no
-         * 32bit hypervisor + 64bit toolstack).
-         *
-         * Predicates in code test guest_width against toolstack
-         * domain's sizeof(unsigned long), so setting guest_width to
-         * toolstack domain's idea of sizeof(unsigned long) matches
-         * the original behaviour for HVM guests.
-         */
-        dinfo->guest_width = sizeof(unsigned long);
-    }
-
-    if ( domid != info.domid )
-    {
-        PERROR("Domain %d does not exist", domid);
-        goto out;
-    }
-
-    ctxt = calloc(sizeof(*ctxt), info.max_vcpu_id + 1);
-    if ( !ctxt )
-    {
-        PERROR("Could not allocate vcpu context array");
-        goto out;
-    }
-
-    for ( i = 0; i <= info.max_vcpu_id; i++ )
-    {
-        if ( xc_vcpu_getcontext(xch, domid, i, &ctxt[nr_vcpus]) == 0 )
-        {
-            if ( xc_core_arch_context_get(&arch_ctxt, &ctxt[nr_vcpus],
-                                          xch, domid) )
-                continue;
-            nr_vcpus++;
-        }
-    }
-    if ( nr_vcpus == 0 )
-    {
-        PERROR("No VCPU context could be grabbed");
-        goto out;
-    }
-
-    /* obtain memory map */
-    sts = xc_core_arch_memory_map_get(xch, &arch_ctxt, &info,
-                                      live_shinfo, &memory_map,
-                                      &nr_memory_map);
-    if ( sts != 0 )
-        goto out;
-
-    /*
-     * Note: this is the *current* number of pages and may change under
-     * a live dump-core.  We'll just take this value, and if more pages
-     * exist, we'll skip them.  If there's less, then we'll just not use
-     * all the array...
-     *
-     * We don't want to use the total potential size of the memory map
-     * since that is usually much higher than info.nr_pages.
-     */
-    nr_pages = info.nr_pages;
-
-    if ( !auto_translated_physmap )
-    {
-        /* obtain p2m table */
-        p2m_array = malloc(nr_pages * sizeof(p2m_array[0]));
-        if ( p2m_array == NULL )
-        {
-            PERROR("Could not allocate p2m array");
-            goto out;
-        }
-
-        sts = xc_core_arch_map_p2m(xch, dinfo->guest_width, &info, live_shinfo,
-                                   &p2m, &dinfo->p2m_size);
-        if ( sts != 0 )
-            goto out;
-    }
-    else
-    {
-        pfn_array = malloc(nr_pages * sizeof(pfn_array[0]));
-        if ( pfn_array == NULL )
-        {
-            PERROR("Could not allocate pfn array");
-            goto out;
-        }
-    }
-
-    /* ehdr.e_shnum and ehdr.e_shstrndx aren't known here yet. fill it later*/
-    xc_core_ehdr_init(&ehdr);
-
-    /* create section header */
-    strtab = xc_core_strtab_init(xch);
-    if ( strtab == NULL )
-    {
-        PERROR("Could not allocate string table");
-        goto out;
-    }
-    sheaders = xc_core_shdr_init(xch);
-    if ( sheaders == NULL )
-    {
-        PERROR("Could not allocate section headers");
-        goto out;
-    }
-    /* null section */
-    shdr = xc_core_shdr_get(xch,sheaders);
-    if ( shdr == NULL )
-    {
-        PERROR("Could not get section header for null section");
-        goto out;
-    }
-
-    /* .shstrtab */
-    shdr = xc_core_shdr_get(xch,sheaders);
-    if ( shdr == NULL )
-    {
-        PERROR("Could not get section header for shstrtab");
-        goto out;
-    }
-    strtab_idx = shdr - sheaders->shdrs;
-    /* strtab_shdr.sh_offset, strtab_shdr.sh_size aren't unknown.
-     * fill it later
-     */
-    sts = xc_core_shdr_set(xch, shdr, strtab, ELF_SHSTRTAB, SHT_STRTAB, 0, 0, 0, 0);
-    if ( sts != 0 )
-        goto out;
-
-    /* elf note section */
-    /* here the number of section header is unknown. fix up offset later. */
-    offset = sizeof(ehdr);
-    filesz =
-        sizeof(struct xen_dumpcore_elfnote_none) +         /* none */
-        sizeof(struct xen_dumpcore_elfnote_header) +       /* core header */
-        sizeof(struct xen_dumpcore_elfnote_xen_version) +  /* xen version */
-        sizeof(struct xen_dumpcore_elfnote_format_version);/* format version */
-    shdr = xc_core_shdr_get(xch,sheaders);
-    if ( shdr == NULL )
-    {
-        PERROR("Could not get section header for note section");
-        goto out;
-    }
-    sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_NOTE, SHT_NOTE,
-                           offset, filesz, 0, 0);
-    if ( sts != 0 )
-        goto out;
-    offset += filesz;
-
-    /* prstatus */
-    shdr = xc_core_shdr_get(xch,sheaders);
-    if ( shdr == NULL )
-    {
-        PERROR("Could not get section header for .xen_prstatus");
-        goto out;
-    }
-    filesz = sizeof(*ctxt) * nr_vcpus;
-    sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_PRSTATUS,
-                           SHT_PROGBITS, offset, filesz,
-                           __alignof__(*ctxt), sizeof(*ctxt));
-    if ( sts != 0 )
-        goto out;
-    offset += filesz;
-
-    /* arch context */
-    sts = xc_core_arch_context_get_shdr(xch, &arch_ctxt, sheaders, strtab,
-                                        &filesz, offset);
-    if ( sts != 0 )
-        goto out;
-    offset += filesz;
-
-    /* shared_info */
-    if ( live_shinfo != NULL )
-    {
-        shdr = xc_core_shdr_get(xch,sheaders);
-        if ( shdr == NULL )
-        {
-            PERROR("Could not get section header for .xen_shared_info");
-            goto out;
-        }
-        filesz = PAGE_SIZE;
-        sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_SHARED_INFO,
-                               SHT_PROGBITS, offset, filesz,
-                               __alignof__(*live_shinfo), PAGE_SIZE);
-        if ( sts != 0 )
-            goto out;
-        offset += filesz;
-    }
-
-    /*
-     * pages and p2m/pfn are the last section to allocate section headers
-     * so that we know the number of section headers here.
-     * 2 = pages section and p2m/pfn table section
-     */
-    fixup = (sheaders->num + 2) * sizeof(*shdr);
-    /* zeroth section should have zero offset */
-    for ( i = 1; i < sheaders->num; i++ )
-        sheaders->shdrs[i].sh_offset += fixup;
-    offset += fixup;
-    dummy_len = ROUNDUP(offset, PAGE_SHIFT) - offset; /* padding length */
-    offset += dummy_len;
-
-    /* pages */
-    shdr = xc_core_shdr_get(xch,sheaders);
-    if ( shdr == NULL )
-    {
-        PERROR("could not get section headers for .xen_pages");
-        goto out;
-    }
-    filesz = (uint64_t)nr_pages * PAGE_SIZE;
-    sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_PAGES, SHT_PROGBITS,
-                           offset, filesz, PAGE_SIZE, PAGE_SIZE);
-    if ( sts != 0 )
-        goto out;
-    offset += filesz;
-
-    /* p2m/pfn table */
-    shdr = xc_core_shdr_get(xch,sheaders);
-    if ( shdr == NULL )
-    {
-        PERROR("Could not get section header for .xen_{p2m, pfn} table");
-        goto out;
-    }
-    if ( !auto_translated_physmap )
-    {
-        filesz = (uint64_t)nr_pages * sizeof(p2m_array[0]);
-        sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_P2M,
-                               SHT_PROGBITS,
-                               offset, filesz, __alignof__(p2m_array[0]),
-                               sizeof(p2m_array[0]));
-    }
-    else
-    {
-        filesz = (uint64_t)nr_pages * sizeof(pfn_array[0]);
-        sts = xc_core_shdr_set(xch, shdr, strtab, XEN_DUMPCORE_SEC_PFN,
-                               SHT_PROGBITS,
-                               offset, filesz, __alignof__(pfn_array[0]),
-                               sizeof(pfn_array[0]));
-    }
-    if ( sts != 0 )
-        goto out;
-    offset += filesz;
-
-    /* fixing up section header string table section header */
-    filesz = strtab->length;
-    sheaders->shdrs[strtab_idx].sh_offset = offset;
-    sheaders->shdrs[strtab_idx].sh_size = filesz;
-
-    /* write out elf header */
-    ehdr.e_shnum = sheaders->num;
-    ehdr.e_shstrndx = strtab_idx;
-    ehdr.e_machine = ELF_ARCH_MACHINE;
-    sts = dump_rtn(xch, args, (char*)&ehdr, sizeof(ehdr));
-    if ( sts != 0 )
-        goto out;
-
-    /* section headers */
-    sts = dump_rtn(xch, args, (char*)sheaders->shdrs,
-                   sheaders->num * sizeof(sheaders->shdrs[0]));
-    if ( sts != 0 )
-        goto out;
-
-    /* elf note section: xen core header */
-    sts = elfnote_dump_none(xch, args, dump_rtn);
-    if ( sts != 0 )
-        goto out;
-
-    /* elf note section: xen core header */
-    sts = elfnote_dump_core_header(xch, args, dump_rtn, &info, nr_vcpus, nr_pages);
-    if ( sts != 0 )
-        goto out;
-
-    /* elf note section: xen version */
-    sts = elfnote_dump_xen_version(xch, args, dump_rtn, dinfo->guest_width);
-    if ( sts != 0 )
-        goto out;
-
-    /* elf note section: format version */
-    sts = elfnote_dump_format_version(xch, args, dump_rtn);
-    if ( sts != 0 )
-        goto out;
-
-    /* prstatus: .xen_prstatus */
-    sts = dump_rtn(xch, args, (char *)ctxt, sizeof(*ctxt) * nr_vcpus);
-    if ( sts != 0 )
-        goto out;
-
-    if ( live_shinfo != NULL )
-    {
-        /* shared_info: .xen_shared_info */
-        sts = dump_rtn(xch, args, (char*)live_shinfo, PAGE_SIZE);
-        if ( sts != 0 )
-            goto out;
-    }
-
-    /* arch specific context */
-    sts = xc_core_arch_context_dump(xch, &arch_ctxt, args, dump_rtn);
-    if ( sts != 0 )
-        goto out;
-
-    /* Pad the output data to page alignment. */
-    memset(dummy, 0, PAGE_SIZE);
-    sts = dump_rtn(xch, args, dummy, dummy_len);
-    if ( sts != 0 )
-        goto out;
-
-    /* dump pages: .xen_pages */
-    j = 0;
-    dump_mem = dump_mem_start;
-    for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
-    {
-        uint64_t pfn_start;
-        uint64_t pfn_end;
-
-        pfn_start = memory_map[map_idx].addr >> PAGE_SHIFT;
-        pfn_end = pfn_start + (memory_map[map_idx].size >> PAGE_SHIFT);
-        for ( i = pfn_start; i < pfn_end; i++ )
-        {
-            uint64_t gmfn;
-            void *vaddr;
-            
-            if ( j >= nr_pages )
-            {
-                /*
-                 * When live dump-mode (-L option) is specified,
-                 * guest domain may increase memory.
-                 */
-                IPRINTF("exceeded nr_pages (%ld) losing pages", nr_pages);
-                goto copy_done;
-            }
-
-            if ( !auto_translated_physmap )
-            {
-                if ( dinfo->guest_width >= sizeof(unsigned long) )
-                {
-                    if ( dinfo->guest_width == sizeof(unsigned long) )
-                        gmfn = p2m[i];
-                    else
-                        gmfn = ((uint64_t *)p2m)[i];
-                    if ( gmfn == INVALID_PFN )
-                        continue;
-                }
-                else
-                {
-                    gmfn = ((uint32_t *)p2m)[i];
-                    if ( gmfn == (uint32_t)INVALID_PFN )
-                       continue;
-                }
-
-                p2m_array[j].pfn = i;
-                p2m_array[j].gmfn = gmfn;
-            }
-            else
-            {
-                if ( !xc_core_arch_gpfn_may_present(&arch_ctxt, i) )
-                    continue;
-
-                gmfn = i;
-                pfn_array[j] = i;
-            }
-
-            vaddr = xc_map_foreign_range(
-                xch, domid, PAGE_SIZE, PROT_READ, gmfn);
-            if ( vaddr == NULL )
-                continue;
-            memcpy(dump_mem, vaddr, PAGE_SIZE);
-            munmap(vaddr, PAGE_SIZE);
-            dump_mem += PAGE_SIZE;
-            if ( (j + 1) % DUMP_INCREMENT == 0 )
-            {
-                sts = dump_rtn(
-                    xch, args, dump_mem_start, dump_mem - dump_mem_start);
-                if ( sts != 0 )
-                    goto out;
-                dump_mem = dump_mem_start;
-            }
-
-            j++;
-        }
-    }
-
-copy_done:
-    sts = dump_rtn(xch, args, dump_mem_start, dump_mem - dump_mem_start);
-    if ( sts != 0 )
-        goto out;
-    if ( j < nr_pages )
-    {
-        /* When live dump-mode (-L option) is specified,
-         * guest domain may reduce memory. pad with zero pages.
-         */
-        DPRINTF("j (%ld) != nr_pages (%ld)", j, nr_pages);
-        memset(dump_mem_start, 0, PAGE_SIZE);
-        for (; j < nr_pages; j++) {
-            sts = dump_rtn(xch, args, dump_mem_start, PAGE_SIZE);
-            if ( sts != 0 )
-                goto out;
-            if ( !auto_translated_physmap )
-            {
-                p2m_array[j].pfn = XC_CORE_INVALID_PFN;
-                p2m_array[j].gmfn = XC_CORE_INVALID_GMFN;
-            }
-            else
-                pfn_array[j] = XC_CORE_INVALID_PFN;
-        }
-    }
-
-    /* p2m/pfn table: .xen_p2m/.xen_pfn */
-    if ( !auto_translated_physmap )
-        sts = dump_rtn(
-            xch, args, (char *)p2m_array, sizeof(p2m_array[0]) * nr_pages);
-    else
-        sts = dump_rtn(
-            xch, args, (char *)pfn_array, sizeof(pfn_array[0]) * nr_pages);
-    if ( sts != 0 )
-        goto out;
-
-    /* elf section header string table: .shstrtab */
-    sts = dump_rtn(xch, args, strtab->strings, strtab->length);
-    if ( sts != 0 )
-        goto out;
-
-    sts = 0;
-
-out:
-    if ( memory_map != NULL )
-        free(memory_map);
-    if ( p2m != NULL )
-        munmap(p2m, PAGE_SIZE * P2M_FL_ENTRIES);
-    if ( p2m_array != NULL )
-        free(p2m_array);
-    if ( pfn_array != NULL )
-        free(pfn_array);
-    if ( sheaders != NULL )
-        xc_core_shdr_free(sheaders);
-    if ( strtab != NULL )
-        xc_core_strtab_free(strtab);
-    if ( ctxt != NULL )
-        free(ctxt);
-    if ( dump_mem_start != NULL )
-        free(dump_mem_start);
-    if ( live_shinfo != NULL )
-        munmap(live_shinfo, PAGE_SIZE);
-    xc_core_arch_context_free(&arch_ctxt);
-
-    return sts;
-}
-
-/* Callback args for writing to a local dump file. */
-struct dump_args {
-    int     fd;
-};
-
-/* Callback routine for writing to a local dump file. */
-static int local_file_dump(xc_interface *xch,
-                           void *args, char *buffer, unsigned int length)
-{
-    struct dump_args *da = args;
-
-    if ( write_exact(da->fd, buffer, length) == -1 )
-    {
-        PERROR("Failed to write buffer");
-        return -errno;
-    }
-
-    if ( length >= (DUMP_INCREMENT * PAGE_SIZE) )
-    {
-        // Now dumping pages -- make sure we discard clean pages from
-        // the cache after each write
-        discard_file_cache(xch, da->fd, 0 /* no flush */);
-    }
-
-    return 0;
-}
-
-int
-xc_domain_dumpcore(xc_interface *xch,
-                   uint32_t domid,
-                   const char *corename)
-{
-    struct dump_args da;
-    int sts;
-
-    if ( (da.fd = open(corename, O_CREAT|O_RDWR|O_TRUNC, S_IWUSR|S_IRUSR)) < 0 )
-    {
-        PERROR("Could not open corefile %s", corename);
-        return -errno;
-    }
-
-    sts = xc_domain_dumpcore_via_callback(
-        xch, domid, &da, &local_file_dump);
-
-    /* flush and discard any remaining portion of the file from cache */
-    discard_file_cache(xch, da.fd, 1/* flush first*/);
-
-    close(da.fd);
-
-    return sts;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_core.h b/tools/libxc/xc_core.h
deleted file mode 100644 (file)
index 36fb755..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-#ifndef XC_CORE_H
-#define XC_CORE_H
-
-#include "xen/version.h"
-#include "xc_private.h"
-#include "xen/libelf/elfstructs.h"
-
-/* section names */
-#define XEN_DUMPCORE_SEC_NOTE                   ".note.Xen"
-#define XEN_DUMPCORE_SEC_PRSTATUS               ".xen_prstatus"
-#define XEN_DUMPCORE_SEC_SHARED_INFO            ".xen_shared_info"
-#define XEN_DUMPCORE_SEC_P2M                    ".xen_p2m"
-#define XEN_DUMPCORE_SEC_PFN                    ".xen_pfn"
-#define XEN_DUMPCORE_SEC_PAGES                  ".xen_pages"
-
-/* elf note name */
-#define XEN_DUMPCORE_ELFNOTE_NAME               "Xen"
-/* note numbers are defined in xen/elfnote.h */
-
-struct elfnote {
-    uint32_t    namesz; /* Elf_Note note; */
-    uint32_t    descsz;
-    uint32_t    type;
-    char        name[4]; /* sizeof("Xen") = 4
-                          * Fotunately this is 64bit aligned so that
-                          * we can use same structore for both 32/64bit
-                          */
-};
-
-struct xen_dumpcore_elfnote_none_desc {
-    /* nothing */
-};
-
-struct xen_dumpcore_elfnote_header_desc {
-    uint64_t    xch_magic;
-    uint64_t    xch_nr_vcpus;
-    uint64_t    xch_nr_pages;
-    uint64_t    xch_page_size;
-};
-
-struct xen_dumpcore_elfnote_xen_version_desc {
-    uint64_t                    major_version;
-    uint64_t                    minor_version;
-    xen_extraversion_t          extra_version;
-    xen_compile_info_t          compile_info;
-    xen_capabilities_info_t     capabilities;
-    xen_changeset_info_t        changeset;
-    xen_platform_parameters_t   platform_parameters;
-    uint64_t                    pagesize;
-};
-
-#define XEN_DUMPCORE_FORMAT_VERSION(major, minor)  \
-    ((major) << 32) | ((minor) & 0xffffffff)
-#define XEN_DUMPCORE_FORMAT_MAJOR(version)      ((major) >> 32)
-#define XEN_DUMPCORE_FORMAT_MINOR(version)      ((minor) & 0xffffffff)
-
-#define XEN_DUMPCORE_FORMAT_MAJOR_CURRENT       ((uint64_t)0)
-#define XEN_DUMPCORE_FORMAT_MINOR_CURRENT       ((uint64_t)1)
-#define XEN_DUMPCORE_FORMAT_VERSION_CURRENT                         \
-    XEN_DUMPCORE_FORMAT_VERSION(XEN_DUMPCORE_FORMAT_MAJOR_CURRENT,  \
-                                XEN_DUMPCORE_FORMAT_MINOR_CURRENT)
-
-struct xen_dumpcore_elfnote_format_version_desc {
-    uint64_t    version;
-};
-
-
-struct xen_dumpcore_elfnote_none {
-    struct elfnote                              elfnote;
-    struct xen_dumpcore_elfnote_none_desc       none;
-};
-
-struct xen_dumpcore_elfnote_header {
-    struct elfnote                              elfnote;
-    struct xen_dumpcore_elfnote_header_desc     header;
-};
-
-struct xen_dumpcore_elfnote_xen_version {
-    struct elfnote                                     elfnote;
-    struct xen_dumpcore_elfnote_xen_version_desc        xen_version;
-};
-
-struct xen_dumpcore_elfnote_format_version {
-    struct elfnote                                      elfnote;
-    struct xen_dumpcore_elfnote_format_version_desc     format_version;
-};
-
-#define XC_CORE_INVALID_PFN     (~(uint64_t)0)
-#define XC_CORE_INVALID_GMFN    (~(uint64_t)0)
-struct xen_dumpcore_p2m {
-    uint64_t    pfn;
-    uint64_t    gmfn;
-};
-
-
-struct xc_core_strtab;
-struct xc_core_section_headers;
-
-Elf64_Shdr*
-xc_core_shdr_get(xc_interface *xch,
-                 struct xc_core_section_headers *sheaders);
-int
-xc_core_shdr_set(xc_interface *xch,
-                 Elf64_Shdr *shdr,
-                 struct xc_core_strtab *strtab,
-                 const char *name, uint32_t type,
-                 uint64_t offset, uint64_t size,
-                 uint64_t addralign, uint64_t entsize);
-
-struct xc_core_memory_map {
-    uint64_t    addr;
-    uint64_t    size;
-};
-typedef struct xc_core_memory_map xc_core_memory_map_t;
-int xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info);
-struct xc_core_arch_context;
-int xc_core_arch_memory_map_get(xc_interface *xch,
-                                struct xc_core_arch_context *arch_ctxt,
-                                xc_dominfo_t *info, shared_info_any_t *live_shinfo,
-                                xc_core_memory_map_t **mapp,
-                                unsigned int *nr_entries);
-int xc_core_arch_map_p2m(xc_interface *xch, unsigned int guest_width,
-                         xc_dominfo_t *info, shared_info_any_t *live_shinfo,
-                         xen_pfn_t **live_p2m, unsigned long *pfnp);
-
-int xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width,
-                                  xc_dominfo_t *info,
-                                  shared_info_any_t *live_shinfo,
-                                  xen_pfn_t **live_p2m, unsigned long *pfnp);
-
-int xc_core_arch_get_scratch_gpfn(xc_interface *xch, uint32_t domid,
-                                  xen_pfn_t *gpfn);
-
-
-#if defined (__i386__) || defined (__x86_64__)
-# include "xc_core_x86.h"
-#elif defined (__arm__) || defined(__aarch64__)
-# include "xc_core_arm.h"
-#else
-# error "unsupported architecture"
-#endif
-
-#ifndef ELF_CORE_EFLAGS
-# define ELF_CORE_EFLAGS 0
-#endif
-
-#endif /* XC_CORE_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_core_arm.c b/tools/libxc/xc_core_arm.c
deleted file mode 100644 (file)
index 7b587b4..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * Copyright (c) 2011 Citrix Systems
- *
- */
-
-#include "xc_private.h"
-#include "xc_core.h"
-
-#include <xen-tools/libs.h>
-
-int
-xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
-                              unsigned long pfn)
-{
-    /* TODO: memory from DT */
-    if (pfn >= 0x80000 && pfn < 0x88000)
-        return 1;
-    return 0;
-}
-
-int
-xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info)
-{
-    return 1;
-}
-
-int
-xc_core_arch_memory_map_get(xc_interface *xch, struct xc_core_arch_context *unused,
-                            xc_dominfo_t *info, shared_info_any_t *live_shinfo,
-                            xc_core_memory_map_t **mapp,
-                            unsigned int *nr_entries)
-{
-    xen_pfn_t p2m_size = 0;
-    xc_core_memory_map_t *map;
-
-    if ( xc_domain_nr_gpfns(xch, info->domid, &p2m_size) < 0 )
-        return -1;
-
-    map = malloc(sizeof(*map));
-    if ( map == NULL )
-    {
-        PERROR("Could not allocate memory");
-        return -1;
-    }
-
-    map->addr = 0;
-    map->size = ((uint64_t)p2m_size) << PAGE_SHIFT;
-
-    *mapp = map;
-    *nr_entries = 1;
-    return 0;
-}
-
-static int
-xc_core_arch_map_p2m_rw(xc_interface *xch, struct domain_info_context *dinfo, xc_dominfo_t *info,
-                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
-                        unsigned long *pfnp, int rw)
-{
-    errno = ENOSYS;
-    return -1;
-}
-
-int
-xc_core_arch_map_p2m(xc_interface *xch, unsigned int guest_width, xc_dominfo_t *info,
-                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
-                        unsigned long *pfnp)
-{
-    struct domain_info_context _dinfo = { .guest_width = guest_width };
-    struct domain_info_context *dinfo = &_dinfo;
-    return xc_core_arch_map_p2m_rw(xch, dinfo, info,
-                                   live_shinfo, live_p2m, pfnp, 0);
-}
-
-int
-xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width, xc_dominfo_t *info,
-                              shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
-                              unsigned long *pfnp)
-{
-    struct domain_info_context _dinfo = { .guest_width = guest_width };
-    struct domain_info_context *dinfo = &_dinfo;
-    return xc_core_arch_map_p2m_rw(xch, dinfo, info,
-                                   live_shinfo, live_p2m, pfnp, 1);
-}
-
-int
-xc_core_arch_get_scratch_gpfn(xc_interface *xch, uint32_t domid,
-                              xen_pfn_t *gpfn)
-{
-    /*
-     * The Grant Table region space is not used until the guest is
-     * booting. Use the first page for the scratch pfn.
-     */
-    BUILD_BUG_ON(GUEST_GNTTAB_SIZE < XC_PAGE_SIZE);
-
-    *gpfn = GUEST_GNTTAB_BASE >> XC_PAGE_SHIFT;
-
-    return 0;
-}
-
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_core_arm.h b/tools/libxc/xc_core_arm.h
deleted file mode 100644 (file)
index 162f7a7..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * Copyright (c) 2012 Citrix Systems
- *
- */
-
-#ifndef XC_CORE_ARM_H
-#define XC_CORE_ARM_H
-
-#define ELF_ARCH_DATA           ELFDATA2LSB
-#define ELF_ARCH_MACHINE        EM_ARM
-
-struct xc_core_arch_context {
-    /* nothing */
-};
-
-#define xc_core_arch_context_init(arch_ctxt)            do {} while (0)
-#define xc_core_arch_context_free(arch_ctxt)            do {} while (0)
-#define xc_core_arch_context_get(arch_ctxt, ctxt, xch, domid) \
-                                                                (0)
-#define xc_core_arch_context_dump(xch, arch_ctxt, args, dump_rtn)    (0)
-
-int
-xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
-                              unsigned long pfn);
-static inline int
-xc_core_arch_context_get_shdr(xc_interface *xch,
-                              struct xc_core_arch_context *arch_ctxt, 
-                              struct xc_core_section_headers *sheaders,
-                              struct xc_core_strtab *strtab,
-                              uint64_t *filesz, uint64_t offset)
-{
-    *filesz = 0;
-    return 0;
-}
-
-#endif /* XC_CORE_ARM_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_core_x86.c b/tools/libxc/xc_core_x86.c
deleted file mode 100644 (file)
index cb76e62..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- */
-
-#include "xc_private.h"
-#include "xc_core.h"
-#include <xen/hvm/e820.h>
-
-int
-xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
-                              unsigned long pfn)
-{
-    if ((pfn >= 0xa0 && pfn < 0xc0) /* VGA hole */
-        || (pfn >= (HVM_BELOW_4G_MMIO_START >> PAGE_SHIFT)
-            && pfn < (1ULL<<32) >> PAGE_SHIFT)) /* MMIO */
-        return 0;
-    return 1;
-}
-
-int
-xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info)
-{
-    return info->hvm;
-}
-
-int
-xc_core_arch_memory_map_get(xc_interface *xch, struct xc_core_arch_context *unused,
-                            xc_dominfo_t *info, shared_info_any_t *live_shinfo,
-                            xc_core_memory_map_t **mapp,
-                            unsigned int *nr_entries)
-{
-    xen_pfn_t p2m_size = 0;
-    xc_core_memory_map_t *map;
-
-    if ( xc_domain_nr_gpfns(xch, info->domid, &p2m_size) < 0 )
-        return -1;
-
-    map = malloc(sizeof(*map));
-    if ( map == NULL )
-    {
-        PERROR("Could not allocate memory");
-        return -1;
-    }
-
-    map->addr = 0;
-    map->size = ((uint64_t)p2m_size) << PAGE_SHIFT;
-
-    *mapp = map;
-    *nr_entries = 1;
-    return 0;
-}
-
-static int
-xc_core_arch_map_p2m_rw(xc_interface *xch, struct domain_info_context *dinfo, xc_dominfo_t *info,
-                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
-                        unsigned long *pfnp, int rw)
-{
-    /* Double and single indirect references to the live P2M table */
-    xen_pfn_t *live_p2m_frame_list_list = NULL;
-    xen_pfn_t *live_p2m_frame_list = NULL;
-    /* Copies of the above. */
-    xen_pfn_t *p2m_frame_list_list = NULL;
-    xen_pfn_t *p2m_frame_list = NULL;
-
-    uint32_t dom = info->domid;
-    int ret = -1;
-    int err;
-    int i;
-
-    if ( xc_domain_nr_gpfns(xch, info->domid, &dinfo->p2m_size) < 0 )
-    {
-        ERROR("Could not get maximum GPFN!");
-        goto out;
-    }
-
-    if ( dinfo->p2m_size < info->nr_pages  )
-    {
-        ERROR("p2m_size < nr_pages -1 (%lx < %lx", dinfo->p2m_size, info->nr_pages - 1);
-        goto out;
-    }
-
-    live_p2m_frame_list_list =
-        xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_READ,
-                             GET_FIELD(live_shinfo, arch.pfn_to_mfn_frame_list_list, dinfo->guest_width));
-
-    if ( !live_p2m_frame_list_list )
-    {
-        PERROR("Couldn't map p2m_frame_list_list (errno %d)", errno);
-        goto out;
-    }
-
-    /* Get a local copy of the live_P2M_frame_list_list */
-    if ( !(p2m_frame_list_list = malloc(PAGE_SIZE)) )
-    {
-        ERROR("Couldn't allocate p2m_frame_list_list array");
-        goto out;
-    }
-    memcpy(p2m_frame_list_list, live_p2m_frame_list_list, PAGE_SIZE);
-
-    /* Canonicalize guest's unsigned long vs ours */
-    if ( dinfo->guest_width > sizeof(unsigned long) )
-        for ( i = 0; i < PAGE_SIZE/sizeof(unsigned long); i++ )
-            if ( i < PAGE_SIZE/dinfo->guest_width )
-                p2m_frame_list_list[i] = ((uint64_t *)p2m_frame_list_list)[i];
-            else
-                p2m_frame_list_list[i] = 0;
-    else if ( dinfo->guest_width < sizeof(unsigned long) )
-        for ( i = PAGE_SIZE/sizeof(unsigned long) - 1; i >= 0; i-- )
-            p2m_frame_list_list[i] = ((uint32_t *)p2m_frame_list_list)[i];
-
-    live_p2m_frame_list =
-        xc_map_foreign_pages(xch, dom, PROT_READ,
-                             p2m_frame_list_list,
-                             P2M_FLL_ENTRIES);
-
-    if ( !live_p2m_frame_list )
-    {
-        PERROR("Couldn't map p2m_frame_list");
-        goto out;
-    }
-
-    /* Get a local copy of the live_P2M_frame_list */
-    if ( !(p2m_frame_list = malloc(P2M_TOOLS_FL_SIZE)) )
-    {
-        ERROR("Couldn't allocate p2m_frame_list array");
-        goto out;
-    }
-    memset(p2m_frame_list, 0, P2M_TOOLS_FL_SIZE);
-    memcpy(p2m_frame_list, live_p2m_frame_list, P2M_GUEST_FL_SIZE);
-
-    /* Canonicalize guest's unsigned long vs ours */
-    if ( dinfo->guest_width > sizeof(unsigned long) )
-        for ( i = 0; i < P2M_FL_ENTRIES; i++ )
-            p2m_frame_list[i] = ((uint64_t *)p2m_frame_list)[i];
-    else if ( dinfo->guest_width < sizeof(unsigned long) )
-        for ( i = P2M_FL_ENTRIES - 1; i >= 0; i-- )
-            p2m_frame_list[i] = ((uint32_t *)p2m_frame_list)[i];
-
-    *live_p2m = xc_map_foreign_pages(xch, dom,
-                                    rw ? (PROT_READ | PROT_WRITE) : PROT_READ,
-                                    p2m_frame_list,
-                                    P2M_FL_ENTRIES);
-
-    if ( !*live_p2m )
-    {
-        PERROR("Couldn't map p2m table");
-        goto out;
-    }
-
-    *pfnp = dinfo->p2m_size;
-
-    ret = 0;
-
-out:
-    err = errno;
-
-    if ( live_p2m_frame_list_list )
-        munmap(live_p2m_frame_list_list, PAGE_SIZE);
-
-    if ( live_p2m_frame_list )
-        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
-
-    free(p2m_frame_list_list);
-
-    free(p2m_frame_list);
-
-    errno = err;
-    return ret;
-}
-
-int
-xc_core_arch_map_p2m(xc_interface *xch, unsigned int guest_width, xc_dominfo_t *info,
-                        shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
-                        unsigned long *pfnp)
-{
-    struct domain_info_context _dinfo = { .guest_width = guest_width };
-    struct domain_info_context *dinfo = &_dinfo;
-    return xc_core_arch_map_p2m_rw(xch, dinfo, info,
-                                   live_shinfo, live_p2m, pfnp, 0);
-}
-
-int
-xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width, xc_dominfo_t *info,
-                              shared_info_any_t *live_shinfo, xen_pfn_t **live_p2m,
-                              unsigned long *pfnp)
-{
-    struct domain_info_context _dinfo = { .guest_width = guest_width };
-    struct domain_info_context *dinfo = &_dinfo;
-    return xc_core_arch_map_p2m_rw(xch, dinfo, info,
-                                   live_shinfo, live_p2m, pfnp, 1);
-}
-
-int
-xc_core_arch_get_scratch_gpfn(xc_interface *xch, uint32_t domid,
-                              xen_pfn_t *gpfn)
-{
-    return xc_domain_nr_gpfns(xch, domid, gpfn);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_core_x86.h b/tools/libxc/xc_core_x86.h
deleted file mode 100644 (file)
index 867146b..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
- *                    VA Linux Systems Japan K.K.
- *
- */
-
-#ifndef XC_CORE_X86_H
-#define XC_CORE_X86_H
-
-#define ELF_ARCH_DATA           ELFDATA2LSB
-#define ELF_ARCH_MACHINE       (dinfo->guest_width == 8 ? EM_X86_64 : EM_386)
-
-struct xc_core_arch_context {
-    /* nothing */
-};
-
-#define xc_core_arch_context_init(arch_ctxt)            do {} while (0)
-#define xc_core_arch_context_free(arch_ctxt)            do {} while (0)
-#define xc_core_arch_context_get(arch_ctxt, ctxt, xch, domid) \
-                                                                (0)
-#define xc_core_arch_context_dump(xch, arch_ctxt, args, dump_rtn)    (0)
-
-int
-xc_core_arch_gpfn_may_present(struct xc_core_arch_context *arch_ctxt,
-                              unsigned long pfn);
-static inline int
-xc_core_arch_context_get_shdr(xc_interface *xch,
-                              struct xc_core_arch_context *arch_ctxt, 
-                              struct xc_core_section_headers *sheaders,
-                              struct xc_core_strtab *strtab,
-                              uint64_t *filesz, uint64_t offset)
-{
-    *filesz = 0;
-    return 0;
-}
-
-#endif /* XC_CORE_X86_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_cpu_hotplug.c b/tools/libxc/xc_cpu_hotplug.c
deleted file mode 100644 (file)
index 2ea9825..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/******************************************************************************
- * xc_cpu_hotplug.c - Libxc API for Xen Physical CPU hotplug Management
- *
- * Copyright (c) 2008, Shan Haitao <haitao.shan@intel.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-#include "xc_private.h"
-
-int xc_cpu_online(xc_interface *xch, int cpu)
-{
-    DECLARE_SYSCTL;
-    int ret;
-
-    sysctl.cmd = XEN_SYSCTL_cpu_hotplug;
-    sysctl.u.cpu_hotplug.cpu = cpu;
-    sysctl.u.cpu_hotplug.op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
-    ret = xc_sysctl(xch, &sysctl);
-
-    return ret;
-}
-
-int xc_cpu_offline(xc_interface *xch, int cpu)
-{
-    DECLARE_SYSCTL;
-    int ret;
-
-    sysctl.cmd = XEN_SYSCTL_cpu_hotplug;
-    sysctl.u.cpu_hotplug.cpu = cpu;
-    sysctl.u.cpu_hotplug.op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
-    ret = xc_sysctl(xch, &sysctl);
-
-    return ret;
-}
-
-int xc_smt_enable(xc_interface *xch)
-{
-    DECLARE_SYSCTL;
-    int ret;
-
-    sysctl.cmd = XEN_SYSCTL_cpu_hotplug;
-    sysctl.u.cpu_hotplug.cpu = 0;
-    sysctl.u.cpu_hotplug.op = XEN_SYSCTL_CPU_HOTPLUG_SMT_ENABLE;
-    ret = xc_sysctl(xch, &sysctl);
-
-    return ret;
-}
-
-int xc_smt_disable(xc_interface *xch)
-{
-    DECLARE_SYSCTL;
-    int ret;
-
-    sysctl.cmd = XEN_SYSCTL_cpu_hotplug;
-    sysctl.u.cpu_hotplug.cpu = 0;
-    sysctl.u.cpu_hotplug.op = XEN_SYSCTL_CPU_HOTPLUG_SMT_DISABLE;
-    ret = xc_sysctl(xch, &sysctl);
-
-    return ret;
-}
-
diff --git a/tools/libxc/xc_cpupool.c b/tools/libxc/xc_cpupool.c
deleted file mode 100644 (file)
index fbd8cc9..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-/******************************************************************************
- * xc_cpupool.c
- *
- * API for manipulating and obtaining information on cpupools.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * Copyright (c) 2009, J Gross.
- */
-
-#include <stdarg.h>
-#include <unistd.h>
-#include "xc_private.h"
-
-static int do_sysctl_save(xc_interface *xch, struct xen_sysctl *sysctl)
-{
-    int ret;
-
-    do {
-        ret = do_sysctl(xch, sysctl);
-    } while ( (ret < 0) && (errno == EAGAIN) );
-
-    return ret;
-}
-
-int xc_cpupool_create(xc_interface *xch,
-                      uint32_t *ppoolid,
-                      uint32_t sched_id)
-{
-    int err;
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_cpupool_op;
-    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_CREATE;
-    sysctl.u.cpupool_op.cpupool_id = (*ppoolid == XC_CPUPOOL_POOLID_ANY) ?
-        XEN_SYSCTL_CPUPOOL_PAR_ANY : *ppoolid;
-    sysctl.u.cpupool_op.sched_id = sched_id;
-    if ( (err = do_sysctl_save(xch, &sysctl)) != 0 )
-        return err;
-
-    *ppoolid = sysctl.u.cpupool_op.cpupool_id;
-    return 0;
-}
-
-int xc_cpupool_destroy(xc_interface *xch,
-                       uint32_t poolid)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_cpupool_op;
-    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_DESTROY;
-    sysctl.u.cpupool_op.cpupool_id = poolid;
-    return do_sysctl_save(xch, &sysctl);
-}
-
-xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, 
-                       uint32_t poolid)
-{
-    int err = 0;
-    xc_cpupoolinfo_t *info = NULL;
-    int local_size;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
-
-    local_size = xc_get_cpumap_size(xch);
-    if (local_size <= 0)
-    {
-        PERROR("Could not get number of cpus");
-        return NULL;
-    }
-
-    local = xc_hypercall_buffer_alloc(xch, local, local_size);
-    if ( local == NULL ) {
-        PERROR("Could not allocate locked memory for xc_cpupool_getinfo");
-        return NULL;
-    }
-
-    sysctl.cmd = XEN_SYSCTL_cpupool_op;
-    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
-    sysctl.u.cpupool_op.cpupool_id = poolid;
-    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-    sysctl.u.cpupool_op.cpumap.nr_bits = local_size * 8;
-
-    err = do_sysctl_save(xch, &sysctl);
-
-    if ( err < 0 )
-       goto out;
-
-    info = calloc(1, sizeof(xc_cpupoolinfo_t));
-    if ( !info )
-       goto out;
-
-    info->cpumap = xc_cpumap_alloc(xch);
-    if (!info->cpumap) {
-        free(info);
-        info = NULL;
-        goto out;
-    }
-    info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
-    info->sched_id = sysctl.u.cpupool_op.sched_id;
-    info->n_dom = sysctl.u.cpupool_op.n_dom;
-    memcpy(info->cpumap, local, local_size);
-
-out:
-    xc_hypercall_buffer_free(xch, local);
-
-    return info;
-}
-
-void xc_cpupool_infofree(xc_interface *xch,
-                         xc_cpupoolinfo_t *info)
-{
-    free(info->cpumap);
-    free(info);
-}
-
-int xc_cpupool_addcpu(xc_interface *xch,
-                      uint32_t poolid,
-                      int cpu)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_cpupool_op;
-    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_ADDCPU;
-    sysctl.u.cpupool_op.cpupool_id = poolid;
-    sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
-    return do_sysctl_save(xch, &sysctl);
-}
-
-/*
- * The hypervisor might return EADDRINUSE when trying to remove a cpu from a
- * cpupool when a domain running in this cpupool has pinned a vcpu
- * temporarily. Do some retries in this case, perhaps the situation
- * cleans up.
- */
-#define NUM_RMCPU_BUSY_RETRIES 5
-
-int xc_cpupool_removecpu(xc_interface *xch,
-                         uint32_t poolid,
-                         int cpu)
-{
-    unsigned retries;
-    int err = 0;
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_cpupool_op;
-    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_RMCPU;
-    sysctl.u.cpupool_op.cpupool_id = poolid;
-    sysctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_SYSCTL_CPUPOOL_PAR_ANY : cpu;
-    for ( retries = 0; retries < NUM_RMCPU_BUSY_RETRIES; retries++ ) {
-        err = do_sysctl_save(xch, &sysctl);
-        if ( err == 0 || errno != EADDRINUSE )
-            break;
-    }
-    return err;
-}
-
-int xc_cpupool_movedomain(xc_interface *xch,
-                          uint32_t poolid,
-                          uint32_t domid)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_cpupool_op;
-    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN;
-    sysctl.u.cpupool_op.cpupool_id = poolid;
-    sysctl.u.cpupool_op.domid = domid;
-    return do_sysctl_save(xch, &sysctl);
-}
-
-xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch)
-{
-    int err = -1;
-    xc_cpumap_t cpumap = NULL;
-    int mapsize;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
-
-    mapsize = xc_get_cpumap_size(xch);
-    if (mapsize <= 0)
-        return NULL;
-
-    local = xc_hypercall_buffer_alloc(xch, local, mapsize);
-    if ( local == NULL ) {
-        PERROR("Could not allocate locked memory for xc_cpupool_freeinfo");
-        return NULL;
-    }
-
-    sysctl.cmd = XEN_SYSCTL_cpupool_op;
-    sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
-    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
-    sysctl.u.cpupool_op.cpumap.nr_bits = mapsize * 8;
-
-    err = do_sysctl_save(xch, &sysctl);
-
-    if ( err < 0 )
-        goto out;
-
-    cpumap = xc_cpumap_alloc(xch);
-    if (cpumap == NULL)
-        goto out;
-
-    memcpy(cpumap, local, mapsize);
-
-out:
-    xc_hypercall_buffer_free(xch, local);
-    return cpumap;
-}
diff --git a/tools/libxc/xc_csched.c b/tools/libxc/xc_csched.c
deleted file mode 100644 (file)
index 8e8c672..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/****************************************************************************
- * (C) 2006 - Emmanuel Ackaouy - XenSource Inc.
- ****************************************************************************
- *
- *        File: xc_csched.c
- *      Author: Emmanuel Ackaouy
- *
- * Description: XC Interface to the credit scheduler
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-int
-xc_sched_credit_domain_set(
-    xc_interface *xch,
-    uint32_t domid,
-    struct xen_domctl_sched_credit *sdom)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_scheduler_op;
-    domctl.domain = domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
-    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
-    domctl.u.scheduler_op.u.credit = *sdom;
-
-    if ( do_domctl(xch, &domctl) )
-        return -1;
-
-    return 0;
-}
-
-int
-xc_sched_credit_domain_get(
-    xc_interface *xch,
-    uint32_t domid,
-    struct xen_domctl_sched_credit *sdom)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_scheduler_op;
-    domctl.domain = domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
-    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
-
-    if ( do_domctl(xch, &domctl) )
-        return -1;
-
-    *sdom = domctl.u.scheduler_op.u.credit;
-
-    return 0;
-}
-
-int
-xc_sched_credit_params_set(
-    xc_interface *xch,
-    uint32_t cpupool_id,
-    struct xen_sysctl_credit_schedule *schedule)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_scheduler_op;
-    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
-    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
-    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
-
-    sysctl.u.scheduler_op.u.sched_credit = *schedule;
-
-    if ( do_sysctl(xch, &sysctl) )
-        return -1;
-
-    *schedule = sysctl.u.scheduler_op.u.sched_credit;
-
-    return 0;
-}
-
-int
-xc_sched_credit_params_get(
-    xc_interface *xch,
-    uint32_t cpupool_id,
-    struct xen_sysctl_credit_schedule *schedule)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_scheduler_op;
-    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
-    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
-    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_getinfo;
-
-    if ( do_sysctl(xch, &sysctl) )
-        return -1;
-
-    *schedule = sysctl.u.scheduler_op.u.sched_credit;
-
-    return 0;
-}
diff --git a/tools/libxc/xc_csched2.c b/tools/libxc/xc_csched2.c
deleted file mode 100644 (file)
index 5eb753a..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/****************************************************************************
- * (C) 2006 - Emmanuel Ackaouy - XenSource Inc.
- ****************************************************************************
- *
- *        File: xc_csched.c
- *      Author: Emmanuel Ackaouy
- *
- * Description: XC Interface to the credit scheduler
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-int
-xc_sched_credit2_domain_set(
-    xc_interface *xch,
-    uint32_t domid,
-    struct xen_domctl_sched_credit2 *sdom)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_scheduler_op;
-    domctl.domain = domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
-    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
-    domctl.u.scheduler_op.u.credit2 = *sdom;
-
-    if ( do_domctl(xch, &domctl) )
-        return -1;
-
-    return 0;
-}
-
-int
-xc_sched_credit2_domain_get(
-    xc_interface *xch,
-    uint32_t domid,
-    struct xen_domctl_sched_credit2 *sdom)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_scheduler_op;
-    domctl.domain = domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
-    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
-
-    if ( do_domctl(xch, &domctl) )
-        return -1;
-
-    *sdom = domctl.u.scheduler_op.u.credit2;
-
-    return 0;
-}
-
-int
-xc_sched_credit2_params_set(
-    xc_interface *xch,
-    uint32_t cpupool_id,
-    struct xen_sysctl_credit2_schedule *schedule)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_scheduler_op;
-    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
-    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
-    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
-
-    sysctl.u.scheduler_op.u.sched_credit2 = *schedule;
-
-    if ( do_sysctl(xch, &sysctl) )
-        return -1;
-
-    *schedule = sysctl.u.scheduler_op.u.sched_credit2;
-
-    return 0;
-}
-
-int
-xc_sched_credit2_params_get(
-    xc_interface *xch,
-    uint32_t cpupool_id,
-    struct xen_sysctl_credit2_schedule *schedule)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_scheduler_op;
-    sysctl.u.scheduler_op.cpupool_id = cpupool_id;
-    sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
-    sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_getinfo;
-
-    if ( do_sysctl(xch, &sysctl) )
-        return -1;
-
-    *schedule = sysctl.u.scheduler_op.u.sched_credit2;
-
-    return 0;
-}
diff --git a/tools/libxc/xc_devicemodel_compat.c b/tools/libxc/xc_devicemodel_compat.c
deleted file mode 100644 (file)
index a46011c..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Compat shims for use of 3rd party consumers of libxenctrl device model
- * functionality which has been split into separate libraries.
- */
-
-#define XC_WANT_COMPAT_DEVICEMODEL_API
-#include "xc_private.h"
-
-int xc_hvm_create_ioreq_server(
-    xc_interface *xch, uint32_t domid, int handle_bufioreq,
-    ioservid_t *id)
-{
-    return xendevicemodel_create_ioreq_server(xch->dmod, domid,
-                                              handle_bufioreq, id);
-}
-
-int xc_hvm_get_ioreq_server_info(
-    xc_interface *xch, uint32_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
-    xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_port)
-{
-    return xendevicemodel_get_ioreq_server_info(xch->dmod, domid, id,
-                                                ioreq_pfn, bufioreq_pfn,
-                                                bufioreq_port);
-}
-
-int xc_hvm_map_io_range_to_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
-    uint64_t start, uint64_t end)
-{
-    return xendevicemodel_map_io_range_to_ioreq_server(xch->dmod, domid,
-                                                       id, is_mmio, start,
-                                                       end);
-}
-
-int xc_hvm_unmap_io_range_from_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
-    uint64_t start, uint64_t end)
-{
-    return xendevicemodel_unmap_io_range_from_ioreq_server(xch->dmod, domid,
-                                                           id, is_mmio,
-                                                           start, end);
-}
-
-int xc_hvm_map_pcidev_to_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
-    uint8_t bus, uint8_t device, uint8_t function)
-{
-    return xendevicemodel_map_pcidev_to_ioreq_server(xch->dmod, domid, id,
-                                                     segment, bus, device,
-                                                     function);
-}
-
-int xc_hvm_unmap_pcidev_from_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
-    uint8_t bus, uint8_t device, uint8_t function)
-{
-    return xendevicemodel_unmap_pcidev_from_ioreq_server(xch->dmod, domid,
-                                                         id, segment, bus,
-                                                         device, function);
-}
-
-int xc_hvm_destroy_ioreq_server(
-    xc_interface *xch, uint32_t domid, ioservid_t id)
-{
-    return xendevicemodel_destroy_ioreq_server(xch->dmod, domid, id);
-}
-
-int xc_hvm_set_ioreq_server_state(
-    xc_interface *xch, uint32_t domid, ioservid_t id, int enabled)
-{
-    return xendevicemodel_set_ioreq_server_state(xch->dmod, domid, id,
-                                                 enabled);
-}
-
-int xc_hvm_set_pci_intx_level(
-    xc_interface *xch, uint32_t domid, uint16_t segment, uint8_t bus,
-    uint8_t device, uint8_t intx, unsigned int level)
-{
-    return xendevicemodel_set_pci_intx_level(xch->dmod, domid, segment,
-                                             bus, device, intx, level);
-}
-
-int xc_hvm_set_isa_irq_level(
-    xc_interface *xch, uint32_t domid, uint8_t irq, unsigned int level)
-{
-    return xendevicemodel_set_isa_irq_level(xch->dmod, domid, irq, level);
-}
-
-int xc_hvm_set_pci_link_route(
-    xc_interface *xch, uint32_t domid, uint8_t link, uint8_t irq)
-{
-    return xendevicemodel_set_pci_link_route(xch->dmod, domid, link, irq);
-}
-
-int xc_hvm_inject_msi(
-    xc_interface *xch, uint32_t domid, uint64_t msi_addr, uint32_t msi_data)
-{
-    return xendevicemodel_inject_msi(xch->dmod, domid, msi_addr, msi_data);
-}
-
-int xc_hvm_track_dirty_vram(
-    xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr,
-    unsigned long *dirty_bitmap)
-{
-    return xendevicemodel_track_dirty_vram(xch->dmod, domid, first_pfn,
-                                           nr, dirty_bitmap);
-}
-
-int xc_hvm_modified_memory(
-    xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr)
-{
-    return xendevicemodel_modified_memory(xch->dmod, domid, first_pfn, nr);
-}
-
-int xc_hvm_set_mem_type(
-    xc_interface *xch, uint32_t domid, hvmmem_type_t type,
-    uint64_t first_pfn, uint32_t nr)
-{
-    return xendevicemodel_set_mem_type(xch->dmod, domid, type, first_pfn,
-                                       nr);
-}
-
-int xc_hvm_inject_trap(
-    xc_interface *xch, uint32_t domid, int vcpu, uint8_t vector,
-    uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t cr2)
-{
-    return xendevicemodel_inject_event(xch->dmod, domid, vcpu, vector,
-                                       type, error_code, insn_len, cr2);
-}
-
-int xc_domain_pin_memory_cacheattr(
-    xc_interface *xch, uint32_t domid, uint64_t start, uint64_t end,
-    uint32_t type)
-{
-    return xendevicemodel_pin_memory_cacheattr(xch->dmod, domid, start, end,
-                                               type);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
deleted file mode 100644 (file)
index 43fab50..0000000
+++ /dev/null
@@ -1,2205 +0,0 @@
-/******************************************************************************
- * xc_domain.c
- *
- * API for manipulating and obtaining information on domains.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * Copyright (c) 2003, K A Fraser.
- */
-
-#include "xc_private.h"
-#include "xc_core.h"
-#include "xc_private.h"
-#include <xen/memory.h>
-#include <xen/hvm/hvm_op.h>
-
-int xc_domain_create(xc_interface *xch, uint32_t *pdomid,
-                     struct xen_domctl_createdomain *config)
-{
-    int err;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_createdomain;
-    domctl.domain = *pdomid;
-    domctl.u.createdomain = *config;
-
-    if ( (err = do_domctl(xch, &domctl)) != 0 )
-        return err;
-
-    *pdomid = (uint16_t)domctl.domain;
-    *config = domctl.u.createdomain;
-
-    return 0;
-}
-
-int xc_domain_cacheflush(xc_interface *xch, uint32_t domid,
-                         xen_pfn_t start_pfn, xen_pfn_t nr_pfns)
-{
-#if defined (__i386__) || defined (__x86_64__)
-    /*
-     * The x86 architecture provides cache coherency guarantees which prevent
-     * the need for this hypercall.  Avoid the overhead of making a hypercall
-     * just for Xen to return -ENOSYS.  It is safe to ignore this call on x86
-     * so we just return 0.
-     */
-    return 0;
-#else
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_cacheflush;
-    domctl.domain = domid;
-    domctl.u.cacheflush.start_pfn = start_pfn;
-    domctl.u.cacheflush.nr_pfns = nr_pfns;
-    return do_domctl(xch, &domctl);
-#endif
-}
-
-int xc_domain_pause(xc_interface *xch,
-                    uint32_t domid)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_pausedomain;
-    domctl.domain = domid;
-    return do_domctl(xch, &domctl);
-}
-
-
-int xc_domain_unpause(xc_interface *xch,
-                      uint32_t domid)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_unpausedomain;
-    domctl.domain = domid;
-    return do_domctl(xch, &domctl);
-}
-
-
-int xc_domain_destroy(xc_interface *xch,
-                      uint32_t domid)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_destroydomain;
-    domctl.domain = domid;
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_shutdown(xc_interface *xch,
-                       uint32_t domid,
-                       int reason)
-{
-    int ret = -1;
-    DECLARE_HYPERCALL_BUFFER(sched_remote_shutdown_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_domain_shutdown hypercall");
-        goto out1;
-    }
-
-    arg->domain_id = domid;
-    arg->reason = reason;
-    ret = xencall2(xch->xcall, __HYPERVISOR_sched_op,
-                   SCHEDOP_remote_shutdown,
-                   HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
- out1:
-    return ret;
-}
-
-
-int xc_domain_node_setaffinity(xc_interface *xch,
-                               uint32_t domid,
-                               xc_nodemap_t nodemap)
-{
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
-    int ret = -1;
-    int nodesize;
-
-    nodesize = xc_get_nodemap_size(xch);
-    if (nodesize <= 0)
-    {
-        PERROR("Could not get number of nodes");
-        goto out;
-    }
-
-    local = xc_hypercall_buffer_alloc(xch, local, nodesize);
-    if ( local == NULL )
-    {
-        PERROR("Could not allocate memory for setnodeaffinity domctl hypercall");
-        goto out;
-    }
-
-    domctl.cmd = XEN_DOMCTL_setnodeaffinity;
-    domctl.domain = domid;
-
-    memcpy(local, nodemap, nodesize);
-    set_xen_guest_handle(domctl.u.nodeaffinity.nodemap.bitmap, local);
-    domctl.u.nodeaffinity.nodemap.nr_bits = nodesize * 8;
-
-    ret = do_domctl(xch, &domctl);
-
-    xc_hypercall_buffer_free(xch, local);
-
- out:
-    return ret;
-}
-
-int xc_domain_node_getaffinity(xc_interface *xch,
-                               uint32_t domid,
-                               xc_nodemap_t nodemap)
-{
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
-    int ret = -1;
-    int nodesize;
-
-    nodesize = xc_get_nodemap_size(xch);
-    if (nodesize <= 0)
-    {
-        PERROR("Could not get number of nodes");
-        goto out;
-    }
-
-    local = xc_hypercall_buffer_alloc(xch, local, nodesize);
-    if ( local == NULL )
-    {
-        PERROR("Could not allocate memory for getnodeaffinity domctl hypercall");
-        goto out;
-    }
-
-    domctl.cmd = XEN_DOMCTL_getnodeaffinity;
-    domctl.domain = domid;
-
-    set_xen_guest_handle(domctl.u.nodeaffinity.nodemap.bitmap, local);
-    domctl.u.nodeaffinity.nodemap.nr_bits = nodesize * 8;
-
-    ret = do_domctl(xch, &domctl);
-
-    memcpy(nodemap, local, nodesize);
-
-    xc_hypercall_buffer_free(xch, local);
-
- out:
-    return ret;
-}
-
-int xc_vcpu_setaffinity(xc_interface *xch,
-                        uint32_t domid,
-                        int vcpu,
-                        xc_cpumap_t cpumap_hard_inout,
-                        xc_cpumap_t cpumap_soft_inout,
-                        uint32_t flags)
-{
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(cpumap_hard_inout, 0,
-                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    DECLARE_HYPERCALL_BOUNCE(cpumap_soft_inout, 0,
-                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    int ret = -1;
-    int cpusize;
-
-    cpusize = xc_get_cpumap_size(xch);
-    if (cpusize <= 0)
-    {
-        PERROR("Could not get number of cpus");
-        return -1;
-    }
-
-    HYPERCALL_BOUNCE_SET_SIZE(cpumap_hard_inout, cpusize);
-    HYPERCALL_BOUNCE_SET_SIZE(cpumap_soft_inout, cpusize);
-
-    if ( xc_hypercall_bounce_pre(xch, cpumap_hard_inout) ||
-         xc_hypercall_bounce_pre(xch, cpumap_soft_inout) )
-    {
-        PERROR("Could not allocate hcall buffers for DOMCTL_setvcpuaffinity");
-        goto out;
-    }
-
-    domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
-    domctl.domain = domid;
-    domctl.u.vcpuaffinity.vcpu = vcpu;
-    domctl.u.vcpuaffinity.flags = flags;
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap,
-                         cpumap_hard_inout);
-    domctl.u.vcpuaffinity.cpumap_hard.nr_bits = cpusize * 8;
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_soft.bitmap,
-                         cpumap_soft_inout);
-    domctl.u.vcpuaffinity.cpumap_soft.nr_bits = cpusize * 8;
-
-    ret = do_domctl(xch, &domctl);
-
- out:
-    xc_hypercall_bounce_post(xch, cpumap_hard_inout);
-    xc_hypercall_bounce_post(xch, cpumap_soft_inout);
-
-    return ret;
-}
-
-
-int xc_vcpu_getaffinity(xc_interface *xch,
-                        uint32_t domid,
-                        int vcpu,
-                        xc_cpumap_t cpumap_hard,
-                        xc_cpumap_t cpumap_soft,
-                        uint32_t flags)
-{
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(cpumap_hard, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BOUNCE(cpumap_soft, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    int ret = -1;
-    int cpusize;
-
-    cpusize = xc_get_cpumap_size(xch);
-    if (cpusize <= 0)
-    {
-        PERROR("Could not get number of cpus");
-        return -1;
-    }
-
-    HYPERCALL_BOUNCE_SET_SIZE(cpumap_hard, cpusize);
-    HYPERCALL_BOUNCE_SET_SIZE(cpumap_soft, cpusize);
-
-    if ( xc_hypercall_bounce_pre(xch, cpumap_hard) ||
-         xc_hypercall_bounce_pre(xch, cpumap_soft) )
-    {
-        PERROR("Could not allocate hcall buffers for DOMCTL_getvcpuaffinity");
-        goto out;
-    }
-
-    domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
-    domctl.domain = domid;
-    domctl.u.vcpuaffinity.vcpu = vcpu;
-    domctl.u.vcpuaffinity.flags = flags;
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_hard.bitmap,
-                         cpumap_hard);
-    domctl.u.vcpuaffinity.cpumap_hard.nr_bits = cpusize * 8;
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap_soft.bitmap,
-                         cpumap_soft);
-    domctl.u.vcpuaffinity.cpumap_soft.nr_bits = cpusize * 8;
-
-    ret = do_domctl(xch, &domctl);
-
- out:
-    xc_hypercall_bounce_post(xch, cpumap_hard);
-    xc_hypercall_bounce_post(xch, cpumap_soft);
-
-    return ret;
-}
-
-int xc_domain_get_guest_width(xc_interface *xch, uint32_t domid,
-                              unsigned int *guest_width)
-{
-    DECLARE_DOMCTL;
-
-    memset(&domctl, 0, sizeof(domctl));
-    domctl.domain = domid;
-    domctl.cmd = XEN_DOMCTL_get_address_size;
-
-    if ( do_domctl(xch, &domctl) != 0 )
-        return 1;
-
-    /* We want the result in bytes */
-    *guest_width = domctl.u.address_size.size / 8;
-    return 0;
-}
-
-int xc_dom_vuart_init(xc_interface *xch,
-                      uint32_t type,
-                      uint32_t domid,
-                      uint32_t console_domid,
-                      xen_pfn_t gfn,
-                      evtchn_port_t *evtchn)
-{
-    DECLARE_DOMCTL;
-    int rc = 0;
-
-    memset(&domctl, 0, sizeof(domctl));
-
-    domctl.cmd = XEN_DOMCTL_vuart_op;
-    domctl.domain = domid;
-    domctl.u.vuart_op.cmd = XEN_DOMCTL_VUART_OP_INIT;
-    domctl.u.vuart_op.type = type;
-    domctl.u.vuart_op.console_domid = console_domid;
-    domctl.u.vuart_op.gfn = gfn;
-
-    if ( (rc = do_domctl(xch, &domctl)) < 0 )
-        return rc;
-
-    *evtchn = domctl.u.vuart_op.evtchn;
-
-    return rc;
-}
-
-int xc_domain_getinfo(xc_interface *xch,
-                      uint32_t first_domid,
-                      unsigned int max_doms,
-                      xc_dominfo_t *info)
-{
-    unsigned int nr_doms;
-    uint32_t next_domid = first_domid;
-    DECLARE_DOMCTL;
-    int rc = 0;
-
-    memset(info, 0, max_doms*sizeof(xc_dominfo_t));
-
-    for ( nr_doms = 0; nr_doms < max_doms; nr_doms++ )
-    {
-        domctl.cmd = XEN_DOMCTL_getdomaininfo;
-        domctl.domain = next_domid;
-        if ( (rc = do_domctl(xch, &domctl)) < 0 )
-            break;
-        info->domid      = domctl.domain;
-
-        info->dying    = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_dying);
-        info->shutdown = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_shutdown);
-        info->paused   = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_paused);
-        info->blocked  = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_blocked);
-        info->running  = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_running);
-        info->hvm      = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest);
-        info->debugged = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_debugged);
-        info->xenstore = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_xs_domain);
-        info->hap      = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hap);
-
-        info->shutdown_reason =
-            (domctl.u.getdomaininfo.flags>>XEN_DOMINF_shutdownshift) &
-            XEN_DOMINF_shutdownmask;
-
-        if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_crash) )
-        {
-            info->shutdown = 0;
-            info->crashed  = 1;
-        }
-
-        info->ssidref  = domctl.u.getdomaininfo.ssidref;
-        info->nr_pages = domctl.u.getdomaininfo.tot_pages;
-        info->nr_outstanding_pages = domctl.u.getdomaininfo.outstanding_pages;
-        info->nr_shared_pages = domctl.u.getdomaininfo.shr_pages;
-        info->nr_paged_pages = domctl.u.getdomaininfo.paged_pages;
-        info->max_memkb = domctl.u.getdomaininfo.max_pages << (PAGE_SHIFT-10);
-        info->shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
-        info->cpu_time = domctl.u.getdomaininfo.cpu_time;
-        info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
-        info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
-        info->cpupool = domctl.u.getdomaininfo.cpupool;
-        info->arch_config = domctl.u.getdomaininfo.arch_config;
-
-        memcpy(info->handle, domctl.u.getdomaininfo.handle,
-               sizeof(xen_domain_handle_t));
-
-        next_domid = (uint16_t)domctl.domain + 1;
-        info++;
-    }
-
-    if ( nr_doms == 0 )
-        return rc;
-
-    return nr_doms;
-}
-
-int xc_domain_getinfolist(xc_interface *xch,
-                          uint32_t first_domain,
-                          unsigned int max_domains,
-                          xc_domaininfo_t *info)
-{
-    int ret = 0;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(info, max_domains*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( xc_hypercall_bounce_pre(xch, info) )
-        return -1;
-
-    sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
-    sysctl.u.getdomaininfolist.first_domain = first_domain;
-    sysctl.u.getdomaininfolist.max_domains  = max_domains;
-    set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
-
-    if ( xc_sysctl(xch, &sysctl) < 0 )
-        ret = -1;
-    else
-        ret = sysctl.u.getdomaininfolist.num_domains;
-
-    xc_hypercall_bounce_post(xch, info);
-
-    return ret;
-}
-
-/* set broken page p2m */
-int xc_set_broken_page_p2m(xc_interface *xch,
-                           uint32_t domid,
-                           unsigned long pfn)
-{
-    int ret;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_set_broken_page_p2m;
-    domctl.domain = domid;
-    domctl.u.set_broken_page_p2m.pfn = pfn;
-    ret = do_domctl(xch, &domctl);
-
-    return ret ? -1 : 0;
-}
-
-/* get info from hvm guest for save */
-int xc_domain_hvm_getcontext(xc_interface *xch,
-                             uint32_t domid,
-                             uint8_t *ctxt_buf,
-                             uint32_t size)
-{
-    int ret;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_gethvmcontext;
-    domctl.domain = domid;
-    domctl.u.hvmcontext.size = size;
-    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
-
-    ret = do_domctl(xch, &domctl);
-
-    xc_hypercall_bounce_post(xch, ctxt_buf);
-
-    return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
-}
-
-/* Get just one element of the HVM guest context.
- * size must be >= HVM_SAVE_LENGTH(type) */
-int xc_domain_hvm_getcontext_partial(xc_interface *xch,
-                                     uint32_t domid,
-                                     uint16_t typecode,
-                                     uint16_t instance,
-                                     void *ctxt_buf,
-                                     uint32_t size)
-{
-    int ret;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( !ctxt_buf || xc_hypercall_bounce_pre(xch, ctxt_buf) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_gethvmcontext_partial;
-    domctl.domain = domid;
-    domctl.u.hvmcontext_partial.type = typecode;
-    domctl.u.hvmcontext_partial.instance = instance;
-    domctl.u.hvmcontext_partial.bufsz = size;
-    set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf);
-
-    ret = do_domctl(xch, &domctl);
-
-    xc_hypercall_bounce_post(xch, ctxt_buf);
-
-    return ret ? -1 : 0;
-}
-
-/* set info to hvm guest for restore */
-int xc_domain_hvm_setcontext(xc_interface *xch,
-                             uint32_t domid,
-                             uint8_t *ctxt_buf,
-                             uint32_t size)
-{
-    int ret;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_sethvmcontext;
-    domctl.domain = domid;
-    domctl.u.hvmcontext.size = size;
-    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
-
-    ret = do_domctl(xch, &domctl);
-
-    xc_hypercall_bounce_post(xch, ctxt_buf);
-
-    return ret;
-}
-
-int xc_vcpu_getcontext(xc_interface *xch,
-                       uint32_t domid,
-                       uint32_t vcpu,
-                       vcpu_guest_context_any_t *ctxt)
-{
-    int rc;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( xc_hypercall_bounce_pre(xch, ctxt) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_getvcpucontext;
-    domctl.domain = domid;
-    domctl.u.vcpucontext.vcpu   = (uint16_t)vcpu;
-    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
-
-    rc = do_domctl(xch, &domctl);
-
-    xc_hypercall_bounce_post(xch, ctxt);
-
-    return rc;
-}
-
-int xc_vcpu_get_extstate(xc_interface *xch,
-                         uint32_t domid,
-                         uint32_t vcpu,
-                         xc_vcpu_extstate_t *extstate)
-{
-    int rc = -ENODEV;
-#if defined (__i386__) || defined(__x86_64__)
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BUFFER(void, buffer);
-    bool get_state;
-
-    if ( !extstate )
-        return -EINVAL;
-
-    domctl.cmd = XEN_DOMCTL_getvcpuextstate;
-    domctl.domain = domid;
-    domctl.u.vcpuextstate.vcpu = (uint16_t)vcpu;
-    domctl.u.vcpuextstate.xfeature_mask = extstate->xfeature_mask;
-    domctl.u.vcpuextstate.size = extstate->size;
-
-    get_state = (extstate->size != 0);
-
-    if ( get_state )
-    {
-        buffer = xc_hypercall_buffer_alloc(xch, buffer, extstate->size);
-
-        if ( !buffer )
-        {
-            PERROR("Unable to allocate memory for vcpu%u's xsave context",
-                   vcpu);
-            rc = -ENOMEM;
-            goto out;
-        }
-
-        set_xen_guest_handle(domctl.u.vcpuextstate.buffer, buffer);
-    }
-
-    rc = do_domctl(xch, &domctl);
-
-    if ( rc )
-        goto out;
-
-    /* A query for the size of buffer to use. */
-    if ( !extstate->size && !extstate->xfeature_mask )
-    {
-        extstate->xfeature_mask = domctl.u.vcpuextstate.xfeature_mask;
-        extstate->size = domctl.u.vcpuextstate.size;
-        goto out;
-    }
-
-    if ( get_state )
-        memcpy(extstate->buffer, buffer, extstate->size);
-
-out:
-    if ( get_state )
-        xc_hypercall_buffer_free(xch, buffer);
-#endif
-
-    return rc;
-}
-
-int xc_watchdog(xc_interface *xch,
-                uint32_t id,
-                uint32_t timeout)
-{
-    int ret = -1;
-    DECLARE_HYPERCALL_BUFFER(sched_watchdog_t, arg);
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_watchdog hypercall");
-        goto out1;
-    }
-
-    arg->id = id;
-    arg->timeout = timeout;
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_sched_op,
-                   SCHEDOP_watchdog,
-                   HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
- out1:
-    return ret;
-}
-
-
-int xc_shadow_control(xc_interface *xch,
-                      uint32_t domid,
-                      unsigned int sop,
-                      xc_hypercall_buffer_t *dirty_bitmap,
-                      unsigned long pages,
-                      unsigned long *mb,
-                      uint32_t mode,
-                      xc_shadow_op_stats_t *stats)
-{
-    int rc;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BUFFER_ARGUMENT(dirty_bitmap);
-
-    memset(&domctl, 0, sizeof(domctl));
-
-    domctl.cmd = XEN_DOMCTL_shadow_op;
-    domctl.domain = domid;
-    domctl.u.shadow_op.op     = sop;
-    domctl.u.shadow_op.pages  = pages;
-    domctl.u.shadow_op.mb     = mb ? *mb : 0;
-    domctl.u.shadow_op.mode   = mode;
-    if (dirty_bitmap != NULL)
-        set_xen_guest_handle(domctl.u.shadow_op.dirty_bitmap,
-                                dirty_bitmap);
-
-    rc = do_domctl(xch, &domctl);
-
-    if ( stats )
-        memcpy(stats, &domctl.u.shadow_op.stats,
-               sizeof(xc_shadow_op_stats_t));
-    
-    if ( mb ) 
-        *mb = domctl.u.shadow_op.mb;
-
-    return (rc == 0) ? domctl.u.shadow_op.pages : rc;
-}
-
-int xc_domain_setmaxmem(xc_interface *xch,
-                        uint32_t domid,
-                        uint64_t max_memkb)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_max_mem;
-    domctl.domain = domid;
-    domctl.u.max_mem.max_memkb = max_memkb;
-    return do_domctl(xch, &domctl);
-}
-
-#if defined(__i386__) || defined(__x86_64__)
-int xc_domain_set_memory_map(xc_interface *xch,
-                               uint32_t domid,
-                               struct e820entry entries[],
-                               uint32_t nr_entries)
-{
-    int rc;
-    struct xen_foreign_memory_map fmap = {
-        .domid = domid,
-        .map = { .nr_entries = nr_entries }
-    };
-    DECLARE_HYPERCALL_BOUNCE(entries, nr_entries * sizeof(struct e820entry),
-                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( !entries || xc_hypercall_bounce_pre(xch, entries) )
-        return -1;
-
-    set_xen_guest_handle(fmap.map.buffer, entries);
-
-    rc = do_memory_op(xch, XENMEM_set_memory_map, &fmap, sizeof(fmap));
-
-    xc_hypercall_bounce_post(xch, entries);
-
-    return rc;
-}
-
-int xc_get_machine_memory_map(xc_interface *xch,
-                              struct e820entry entries[],
-                              uint32_t max_entries)
-{
-    int rc;
-    struct xen_memory_map memmap = {
-        .nr_entries = max_entries
-    };
-    DECLARE_HYPERCALL_BOUNCE(entries, sizeof(struct e820entry) * max_entries,
-                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( !entries || xc_hypercall_bounce_pre(xch, entries) || max_entries <= 1)
-        return -1;
-
-
-    set_xen_guest_handle(memmap.buffer, entries);
-
-    rc = do_memory_op(xch, XENMEM_machine_memory_map, &memmap, sizeof(memmap));
-
-    xc_hypercall_bounce_post(xch, entries);
-
-    return rc ? rc : memmap.nr_entries;
-}
-int xc_domain_set_memmap_limit(xc_interface *xch,
-                               uint32_t domid,
-                               unsigned long map_limitkb)
-{
-    struct e820entry e820;
-
-    e820.addr = 0;
-    e820.size = (uint64_t)map_limitkb << 10;
-    e820.type = E820_RAM;
-
-    return xc_domain_set_memory_map(xch, domid, &e820, 1);
-}
-#else
-int xc_domain_set_memmap_limit(xc_interface *xch,
-                               uint32_t domid,
-                               unsigned long map_limitkb)
-{
-    PERROR("Function not implemented");
-    errno = ENOSYS;
-    return -1;
-}
-#endif
-
-int xc_reserved_device_memory_map(xc_interface *xch,
-                                  uint32_t flags,
-                                  uint16_t seg,
-                                  uint8_t bus,
-                                  uint8_t devfn,
-                                  struct xen_reserved_device_memory entries[],
-                                  uint32_t *max_entries)
-{
-    int rc;
-    struct xen_reserved_device_memory_map xrdmmap = {
-        .flags = flags,
-        .dev.pci.seg = seg,
-        .dev.pci.bus = bus,
-        .dev.pci.devfn = devfn,
-        .nr_entries = *max_entries
-    };
-    DECLARE_HYPERCALL_BOUNCE(entries,
-                             sizeof(struct xen_reserved_device_memory) *
-                             *max_entries, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( xc_hypercall_bounce_pre(xch, entries) )
-        return -1;
-
-    set_xen_guest_handle(xrdmmap.buffer, entries);
-
-    rc = do_memory_op(xch, XENMEM_reserved_device_memory_map,
-                      &xrdmmap, sizeof(xrdmmap));
-
-    xc_hypercall_bounce_post(xch, entries);
-
-    *max_entries = xrdmmap.nr_entries;
-
-    return rc;
-}
-
-int xc_domain_set_time_offset(xc_interface *xch,
-                              uint32_t domid,
-                              int32_t time_offset_seconds)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_settimeoffset;
-    domctl.domain = domid;
-    domctl.u.settimeoffset.time_offset_seconds = time_offset_seconds;
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_disable_migrate(xc_interface *xch, uint32_t domid)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_disable_migrate;
-    domctl.domain = domid;
-    domctl.u.disable_migrate.disable = 1;
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_set_tsc_info(xc_interface *xch,
-                           uint32_t domid,
-                           uint32_t tsc_mode,
-                           uint64_t elapsed_nsec,
-                           uint32_t gtsc_khz,
-                           uint32_t incarnation)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_settscinfo;
-    domctl.domain = domid;
-    domctl.u.tsc_info.tsc_mode = tsc_mode;
-    domctl.u.tsc_info.elapsed_nsec = elapsed_nsec;
-    domctl.u.tsc_info.gtsc_khz = gtsc_khz;
-    domctl.u.tsc_info.incarnation = incarnation;
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_get_tsc_info(xc_interface *xch,
-                           uint32_t domid,
-                           uint32_t *tsc_mode,
-                           uint64_t *elapsed_nsec,
-                           uint32_t *gtsc_khz,
-                           uint32_t *incarnation)
-{
-    int rc;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_gettscinfo;
-    domctl.domain = domid;
-    rc = do_domctl(xch, &domctl);
-    if ( rc == 0 )
-    {
-        *tsc_mode = domctl.u.tsc_info.tsc_mode;
-        *elapsed_nsec = domctl.u.tsc_info.elapsed_nsec;
-        *gtsc_khz = domctl.u.tsc_info.gtsc_khz;
-        *incarnation = domctl.u.tsc_info.incarnation;
-    }
-    return rc;
-}
-
-
-int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns)
-{
-    long rc = do_memory_op(xch, XENMEM_maximum_gpfn, &domid, sizeof(domid));
-
-    if ( rc >= 0 )
-    {
-        *gpfns = rc;
-        rc = 0;
-    }
-    return rc;
-}
-
-int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns)
-{
-    int rc = xc_domain_maximum_gpfn(xch, domid, gpfns);
-
-    if ( rc >= 0 )
-        *gpfns += 1;
-
-    return rc;
-}
-
-int xc_domain_increase_reservation(xc_interface *xch,
-                                   uint32_t domid,
-                                   unsigned long nr_extents,
-                                   unsigned int extent_order,
-                                   unsigned int mem_flags,
-                                   xen_pfn_t *extent_start)
-{
-    int err;
-    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    struct xen_memory_reservation reservation = {
-        .nr_extents   = nr_extents,
-        .extent_order = extent_order,
-        .mem_flags    = mem_flags,
-        .domid        = domid
-    };
-
-    /* may be NULL */
-    if ( xc_hypercall_bounce_pre(xch, extent_start) )
-    {
-        PERROR("Could not bounce memory for XENMEM_increase_reservation hypercall");
-        return -1;
-    }
-
-    set_xen_guest_handle(reservation.extent_start, extent_start);
-
-    err = do_memory_op(xch, XENMEM_increase_reservation, &reservation, sizeof(reservation));
-
-    xc_hypercall_bounce_post(xch, extent_start);
-
-    return err;
-}
-
-int xc_domain_increase_reservation_exact(xc_interface *xch,
-                                         uint32_t domid,
-                                         unsigned long nr_extents,
-                                         unsigned int extent_order,
-                                         unsigned int mem_flags,
-                                         xen_pfn_t *extent_start)
-{
-    int err;
-
-    err = xc_domain_increase_reservation(xch, domid, nr_extents,
-                                         extent_order, mem_flags, extent_start);
-
-    if ( err == nr_extents )
-        return 0;
-
-    if ( err >= 0 )
-    {
-        DPRINTF("Failed allocation for dom %d: "
-                "%ld extents of order %d, mem_flags %x\n",
-                domid, nr_extents, extent_order, mem_flags);
-        errno = ENOMEM;
-        err = -1;
-    }
-
-    return err;
-}
-
-int xc_domain_decrease_reservation(xc_interface *xch,
-                                   uint32_t domid,
-                                   unsigned long nr_extents,
-                                   unsigned int extent_order,
-                                   xen_pfn_t *extent_start)
-{
-    int err;
-    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    struct xen_memory_reservation reservation = {
-        .nr_extents   = nr_extents,
-        .extent_order = extent_order,
-        .mem_flags    = 0,
-        .domid        = domid
-    };
-
-    if ( extent_start == NULL )
-    {
-        DPRINTF("decrease_reservation extent_start is NULL!\n");
-        errno = EINVAL;
-        return -1;
-    }
-
-    if ( xc_hypercall_bounce_pre(xch, extent_start) )
-    {
-        PERROR("Could not bounce memory for XENMEM_decrease_reservation hypercall");
-        return -1;
-    }
-    set_xen_guest_handle(reservation.extent_start, extent_start);
-
-    err = do_memory_op(xch, XENMEM_decrease_reservation, &reservation, sizeof(reservation));
-
-    xc_hypercall_bounce_post(xch, extent_start);
-
-    return err;
-}
-
-int xc_domain_decrease_reservation_exact(xc_interface *xch,
-                                         uint32_t domid,
-                                         unsigned long nr_extents,
-                                         unsigned int extent_order,
-                                         xen_pfn_t *extent_start)
-{
-    int err;
-
-    err = xc_domain_decrease_reservation(xch, domid, nr_extents,
-                                         extent_order, extent_start);
-
-    if ( err == nr_extents )
-        return 0;
-
-    if ( err >= 0 )
-    {
-        DPRINTF("Failed deallocation for dom %d: %ld extents of order %d\n",
-                domid, nr_extents, extent_order);
-        errno = EINVAL;
-        err = -1;
-    }
-
-    return err;
-}
-
-int xc_domain_add_to_physmap(xc_interface *xch,
-                             uint32_t domid,
-                             unsigned int space,
-                             unsigned long idx,
-                             xen_pfn_t gpfn)
-{
-    struct xen_add_to_physmap xatp = {
-        .domid = domid,
-        .space = space,
-        .idx = idx,
-        .gpfn = gpfn,
-    };
-    return do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
-}
-
-int xc_domain_add_to_physmap_batch(xc_interface *xch,
-                                   uint32_t domid,
-                                   uint32_t foreign_domid,
-                                   unsigned int space,
-                                   unsigned int size,
-                                   xen_ulong_t *idxs,
-                                   xen_pfn_t *gpfns,
-                                   int *errs)
-{
-    int rc;
-    DECLARE_HYPERCALL_BOUNCE(idxs, size * sizeof(*idxs), XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    DECLARE_HYPERCALL_BOUNCE(gpfns, size * sizeof(*gpfns), XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    DECLARE_HYPERCALL_BOUNCE(errs, size * sizeof(*errs), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    struct xen_add_to_physmap_batch xatp_batch = {
-        .domid = domid,
-        .space = space,
-        .size = size,
-        .u = { .foreign_domid = foreign_domid }
-    };
-
-    if ( xc_hypercall_bounce_pre(xch, idxs)  ||
-         xc_hypercall_bounce_pre(xch, gpfns) ||
-         xc_hypercall_bounce_pre(xch, errs)  )
-    {
-        PERROR("Could not bounce memory for XENMEM_add_to_physmap_batch");
-        rc = -1;
-        goto out;
-    }
-
-    set_xen_guest_handle(xatp_batch.idxs, idxs);
-    set_xen_guest_handle(xatp_batch.gpfns, gpfns);
-    set_xen_guest_handle(xatp_batch.errs, errs);
-
-    rc = do_memory_op(xch, XENMEM_add_to_physmap_batch,
-                      &xatp_batch, sizeof(xatp_batch));
-
-out:
-    xc_hypercall_bounce_post(xch, idxs);
-    xc_hypercall_bounce_post(xch, gpfns);
-    xc_hypercall_bounce_post(xch, errs);
-
-    return rc;
-}
-
-int xc_domain_remove_from_physmap(xc_interface *xch,
-                                  uint32_t domid,
-                                  xen_pfn_t gpfn)
-{
-    struct xen_remove_from_physmap xrfp = {
-        .domid = domid,
-        .gpfn = gpfn,
-    };
-    return do_memory_op(xch, XENMEM_remove_from_physmap, &xrfp, sizeof(xrfp));
-}
-
-int xc_domain_claim_pages(xc_interface *xch,
-                               uint32_t domid,
-                               unsigned long nr_pages)
-{
-    int err;
-    struct xen_memory_reservation reservation = {
-        .nr_extents   = nr_pages,
-        .extent_order = 0,
-        .mem_flags    = 0, /* no flags */
-        .domid        = domid
-    };
-
-    set_xen_guest_handle(reservation.extent_start, HYPERCALL_BUFFER_NULL);
-
-    err = do_memory_op(xch, XENMEM_claim_pages, &reservation, sizeof(reservation));
-    /* Ignore it if the hypervisor does not support the call. */
-    if (err == -1 && errno == ENOSYS)
-        err = errno = 0;
-    return err;
-}
-
-int xc_domain_populate_physmap(xc_interface *xch,
-                               uint32_t domid,
-                               unsigned long nr_extents,
-                               unsigned int extent_order,
-                               unsigned int mem_flags,
-                               xen_pfn_t *extent_start)
-{
-    int err;
-    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    struct xen_memory_reservation reservation = {
-        .nr_extents   = nr_extents,
-        .extent_order = extent_order,
-        .mem_flags    = mem_flags,
-        .domid        = domid
-    };
-
-    if ( xc_hypercall_bounce_pre(xch, extent_start) )
-    {
-        PERROR("Could not bounce memory for XENMEM_populate_physmap hypercall");
-        return -1;
-    }
-    set_xen_guest_handle(reservation.extent_start, extent_start);
-
-    err = do_memory_op(xch, XENMEM_populate_physmap, &reservation, sizeof(reservation));
-
-    xc_hypercall_bounce_post(xch, extent_start);
-    return err;
-}
-
-int xc_domain_populate_physmap_exact(xc_interface *xch,
-                                     uint32_t domid,
-                                     unsigned long nr_extents,
-                                     unsigned int extent_order,
-                                     unsigned int mem_flags,
-                                     xen_pfn_t *extent_start)
-{
-    int err;
-
-    err = xc_domain_populate_physmap(xch, domid, nr_extents,
-                                     extent_order, mem_flags, extent_start);
-    if ( err == nr_extents )
-        return 0;
-
-    if ( err >= 0 )
-    {
-        DPRINTF("Failed allocation for dom %d: %ld extents of order %d\n",
-                domid, nr_extents, extent_order);
-        errno = EBUSY;
-        err = -1;
-    }
-
-    return err;
-}
-
-int xc_domain_memory_exchange_pages(xc_interface *xch,
-                                    uint32_t domid,
-                                    unsigned long nr_in_extents,
-                                    unsigned int in_order,
-                                    xen_pfn_t *in_extents,
-                                    unsigned long nr_out_extents,
-                                    unsigned int out_order,
-                                    xen_pfn_t *out_extents)
-{
-    int rc = -1;
-    DECLARE_HYPERCALL_BOUNCE(in_extents, nr_in_extents*sizeof(*in_extents), XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    DECLARE_HYPERCALL_BOUNCE(out_extents, nr_out_extents*sizeof(*out_extents), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    struct xen_memory_exchange exchange = {
-        .in = {
-            .nr_extents   = nr_in_extents,
-            .extent_order = in_order,
-            .domid        = domid
-        },
-        .out = {
-            .nr_extents   = nr_out_extents,
-            .extent_order = out_order,
-            .domid        = domid
-        }
-    };
-
-    if ( xc_hypercall_bounce_pre(xch, in_extents) ||
-         xc_hypercall_bounce_pre(xch, out_extents))
-        goto out;
-
-    set_xen_guest_handle(exchange.in.extent_start, in_extents);
-    set_xen_guest_handle(exchange.out.extent_start, out_extents);
-
-    rc = do_memory_op(xch, XENMEM_exchange, &exchange, sizeof(exchange));
-
-out:
-    xc_hypercall_bounce_post(xch, in_extents);
-    xc_hypercall_bounce_post(xch, out_extents);
-
-    return rc;
-}
-
-/* Currently only implemented on x86. This cannot be handled in the
- * caller, e.g. by looking for errno==ENOSYS because of the broken
- * error reporting style. Once this is fixed then this condition can
- * be removed.
- */
-#if defined(__i386__)||defined(__x86_64__)
-static int xc_domain_pod_target(xc_interface *xch,
-                                int op,
-                                uint32_t domid,
-                                uint64_t target_pages,
-                                uint64_t *tot_pages,
-                                uint64_t *pod_cache_pages,
-                                uint64_t *pod_entries)
-{
-    int err;
-
-    struct xen_pod_target pod_target = {
-        .domid = domid,
-        .target_pages = target_pages
-    };
-
-    err = do_memory_op(xch, op, &pod_target, sizeof(pod_target));
-
-    if ( err < 0 )
-    {
-        DPRINTF("Failed %s_pod_target dom %d\n",
-                (op==XENMEM_set_pod_target)?"set":"get",
-                domid);
-        errno = -err;
-        err = -1;
-    }
-    else
-        err = 0;
-
-    if ( tot_pages )
-        *tot_pages = pod_target.tot_pages;
-    if ( pod_cache_pages )
-        *pod_cache_pages = pod_target.pod_cache_pages;
-    if ( pod_entries )
-        *pod_entries = pod_target.pod_entries;
-
-    return err;
-}
-
-
-int xc_domain_set_pod_target(xc_interface *xch,
-                             uint32_t domid,
-                             uint64_t target_pages,
-                             uint64_t *tot_pages,
-                             uint64_t *pod_cache_pages,
-                             uint64_t *pod_entries)
-{
-    return xc_domain_pod_target(xch,
-                                XENMEM_set_pod_target,
-                                domid,
-                                target_pages,
-                                tot_pages,
-                                pod_cache_pages,
-                                pod_entries);
-}
-
-int xc_domain_get_pod_target(xc_interface *xch,
-                             uint32_t domid,
-                             uint64_t *tot_pages,
-                             uint64_t *pod_cache_pages,
-                             uint64_t *pod_entries)
-{
-    return xc_domain_pod_target(xch,
-                                XENMEM_get_pod_target,
-                                domid,
-                                -1,
-                                tot_pages,
-                                pod_cache_pages,
-                                pod_entries);
-}
-#else
-int xc_domain_set_pod_target(xc_interface *xch,
-                             uint32_t domid,
-                             uint64_t target_pages,
-                             uint64_t *tot_pages,
-                             uint64_t *pod_cache_pages,
-                             uint64_t *pod_entries)
-{
-    return 0;
-}
-int xc_domain_get_pod_target(xc_interface *xch,
-                             uint32_t domid,
-                             uint64_t *tot_pages,
-                             uint64_t *pod_cache_pages,
-                             uint64_t *pod_entries)
-{
-    /* On x86 (above) xc_domain_pod_target will incorrectly return -1
-     * with errno==-1 on error. Do the same for least surprise. */
-    errno = -1;
-    return -1;
-}
-#endif
-
-int xc_domain_max_vcpus(xc_interface *xch, uint32_t domid, unsigned int max)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_max_vcpus;
-    domctl.domain = domid;
-    domctl.u.max_vcpus.max    = max;
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
-                        xen_domain_handle_t handle)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_setdomainhandle;
-    domctl.domain = domid;
-    memcpy(domctl.u.setdomainhandle.handle, handle,
-           sizeof(xen_domain_handle_t));
-    return do_domctl(xch, &domctl);
-}
-
-int xc_vcpu_getinfo(xc_interface *xch,
-                    uint32_t domid,
-                    uint32_t vcpu,
-                    xc_vcpuinfo_t *info)
-{
-    int rc;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_getvcpuinfo;
-    domctl.domain = domid;
-    domctl.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
-
-    rc = do_domctl(xch, &domctl);
-
-    memcpy(info, &domctl.u.getvcpuinfo, sizeof(*info));
-
-    return rc;
-}
-
-int xc_domain_ioport_permission(xc_interface *xch,
-                                uint32_t domid,
-                                uint32_t first_port,
-                                uint32_t nr_ports,
-                                uint32_t allow_access)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_ioport_permission;
-    domctl.domain = domid;
-    domctl.u.ioport_permission.first_port = first_port;
-    domctl.u.ioport_permission.nr_ports = nr_ports;
-    domctl.u.ioport_permission.allow_access = allow_access;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_availheap(xc_interface *xch,
-                 int min_width,
-                 int max_width,
-                 int node,
-                 uint64_t *bytes)
-{
-    DECLARE_SYSCTL;
-    int rc;
-
-    sysctl.cmd = XEN_SYSCTL_availheap;
-    sysctl.u.availheap.min_bitwidth = min_width;
-    sysctl.u.availheap.max_bitwidth = max_width;
-    sysctl.u.availheap.node = node;
-
-    rc = xc_sysctl(xch, &sysctl);
-
-    *bytes = sysctl.u.availheap.avail_bytes;
-
-    return rc;
-}
-
-int xc_vcpu_setcontext(xc_interface *xch,
-                       uint32_t domid,
-                       uint32_t vcpu,
-                       vcpu_guest_context_any_t *ctxt)
-{
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    int rc;
-
-    if ( xc_hypercall_bounce_pre(xch, ctxt) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_setvcpucontext;
-    domctl.domain = domid;
-    domctl.u.vcpucontext.vcpu = vcpu;
-    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
-
-    rc = do_domctl(xch, &domctl);
-
-    xc_hypercall_bounce_post(xch, ctxt);
-
-    return rc;
-}
-
-int xc_domain_irq_permission(xc_interface *xch,
-                             uint32_t domid,
-                             uint8_t pirq,
-                             uint8_t allow_access)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_irq_permission;
-    domctl.domain = domid;
-    domctl.u.irq_permission.pirq = pirq;
-    domctl.u.irq_permission.allow_access = allow_access;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_iomem_permission(xc_interface *xch,
-                               uint32_t domid,
-                               unsigned long first_mfn,
-                               unsigned long nr_mfns,
-                               uint8_t allow_access)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_iomem_permission;
-    domctl.domain = domid;
-    domctl.u.iomem_permission.first_mfn = first_mfn;
-    domctl.u.iomem_permission.nr_mfns = nr_mfns;
-    domctl.u.iomem_permission.allow_access = allow_access;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_send_trigger(xc_interface *xch,
-                           uint32_t domid,
-                           uint32_t trigger,
-                           uint32_t vcpu)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_sendtrigger;
-    domctl.domain = domid;
-    domctl.u.sendtrigger.trigger = trigger;
-    domctl.u.sendtrigger.vcpu = vcpu;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value)
-{
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_param_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = dom;
-    arg->index = param;
-    arg->value = value;
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_param,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value)
-{
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_param_t, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
-
-    arg->domid = dom;
-    arg->index = param;
-    rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_get_param,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-    *value = arg->value;
-    xc_hypercall_buffer_free(handle, arg);
-    return rc;
-}
-
-int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value)
-{
-    return xc_hvm_param_set(handle, dom, param, value);
-}
-
-int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value)
-{
-    uint64_t v;
-    int ret;
-
-    ret = xc_hvm_param_get(handle, dom, param, &v);
-    if (ret < 0)
-        return ret;
-    *value = v;
-    return 0;
-}
-
-int xc_domain_setdebugging(xc_interface *xch,
-                           uint32_t domid,
-                           unsigned int enable)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_setdebugging;
-    domctl.domain = domid;
-    domctl.u.setdebugging.enable = enable;
-    return do_domctl(xch, &domctl);
-}
-
-int xc_assign_device(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t machine_sbdf,
-    uint32_t flags)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_assign_device;
-    domctl.domain = domid;
-    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_PCI;
-    domctl.u.assign_device.u.pci.machine_sbdf = machine_sbdf;
-    domctl.u.assign_device.flags = flags;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_get_device_group(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t machine_sbdf,
-    uint32_t max_sdevs,
-    uint32_t *num_sdevs,
-    uint32_t *sdev_array)
-{
-    int rc;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(sdev_array, max_sdevs * sizeof(*sdev_array), XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, sdev_array) )
-    {
-        PERROR("Could not bounce buffer for xc_get_device_group");
-        return -1;
-    }
-
-    domctl.cmd = XEN_DOMCTL_get_device_group;
-    domctl.domain = domid;
-
-    domctl.u.get_device_group.machine_sbdf = machine_sbdf;
-    domctl.u.get_device_group.max_sdevs = max_sdevs;
-
-    set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array);
-
-    rc = do_domctl(xch, &domctl);
-
-    *num_sdevs = domctl.u.get_device_group.num_sdevs;
-
-    xc_hypercall_bounce_post(xch, sdev_array);
-
-    return rc;
-}
-
-int xc_test_assign_device(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t machine_sbdf)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_test_assign_device;
-    domctl.domain = domid;
-    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_PCI;
-    domctl.u.assign_device.u.pci.machine_sbdf = machine_sbdf;
-    domctl.u.assign_device.flags = 0;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_deassign_device(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t machine_sbdf)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_deassign_device;
-    domctl.domain = domid;
-    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_PCI;
-    domctl.u.assign_device.u.pci.machine_sbdf = machine_sbdf;
-    domctl.u.assign_device.flags = 0;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_assign_dt_device(
-    xc_interface *xch,
-    uint32_t domid,
-    char *path)
-{
-    int rc;
-    size_t size = strlen(path);
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(path, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, path) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_assign_device;
-    domctl.domain = domid;
-
-    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
-    domctl.u.assign_device.u.dt.size = size;
-    /*
-     * DT doesn't own any RDM so actually DT has nothing to do
-     * for any flag and here just fix that as 0.
-     */
-    domctl.u.assign_device.flags = 0;
-    set_xen_guest_handle(domctl.u.assign_device.u.dt.path, path);
-
-    rc = do_domctl(xch, &domctl);
-
-    xc_hypercall_bounce_post(xch, path);
-
-    return rc;
-}
-
-int xc_test_assign_dt_device(
-    xc_interface *xch,
-    uint32_t domid,
-    char *path)
-{
-    int rc;
-    size_t size = strlen(path);
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(path, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, path) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_test_assign_device;
-    domctl.domain = domid;
-
-    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
-    domctl.u.assign_device.u.dt.size = size;
-    set_xen_guest_handle(domctl.u.assign_device.u.dt.path, path);
-    domctl.u.assign_device.flags = 0;
-
-    rc = do_domctl(xch, &domctl);
-
-    xc_hypercall_bounce_post(xch, path);
-
-    return rc;
-}
-
-int xc_deassign_dt_device(
-    xc_interface *xch,
-    uint32_t domid,
-    char *path)
-{
-    int rc;
-    size_t size = strlen(path);
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(path, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, path) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_deassign_device;
-    domctl.domain = domid;
-
-    domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
-    domctl.u.assign_device.u.dt.size = size;
-    set_xen_guest_handle(domctl.u.assign_device.u.dt.path, path);
-    domctl.u.assign_device.flags = 0;
-
-    rc = do_domctl(xch, &domctl);
-
-    xc_hypercall_bounce_post(xch, path);
-
-    return rc;
-}
-
-
-
-
-int xc_domain_update_msi_irq(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t gvec,
-    uint32_t pirq,
-    uint32_t gflags,
-    uint64_t gtable)
-{
-    int rc;
-    struct xen_domctl_bind_pt_irq *bind;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_bind_pt_irq;
-    domctl.domain = domid;
-
-    bind = &(domctl.u.bind_pt_irq);
-    bind->irq_type = PT_IRQ_TYPE_MSI;
-    bind->machine_irq = pirq;
-    bind->u.msi.gvec = gvec;
-    bind->u.msi.gflags = gflags;
-    bind->u.msi.gtable = gtable;
-
-    rc = do_domctl(xch, &domctl);
-    return rc;
-}
-
-int xc_domain_unbind_msi_irq(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t gvec,
-    uint32_t pirq,
-    uint32_t gflags)
-{
-    int rc;
-    struct xen_domctl_bind_pt_irq *bind;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
-    domctl.domain = domid;
-
-    bind = &(domctl.u.bind_pt_irq);
-    bind->irq_type = PT_IRQ_TYPE_MSI;
-    bind->machine_irq = pirq;
-    bind->u.msi.gvec = gvec;
-    bind->u.msi.gflags = gflags;
-
-    rc = do_domctl(xch, &domctl);
-    return rc;
-}
-
-/* Pass-through: binds machine irq to guests irq */
-static int xc_domain_bind_pt_irq_int(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t machine_irq,
-    uint8_t irq_type,
-    uint8_t bus,
-    uint8_t device,
-    uint8_t intx,
-    uint8_t isa_irq,
-    uint16_t spi)
-{
-    int rc;
-    struct xen_domctl_bind_pt_irq *bind;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_bind_pt_irq;
-    domctl.domain = domid;
-
-    bind = &(domctl.u.bind_pt_irq);
-    bind->irq_type = irq_type;
-    bind->machine_irq = machine_irq;
-    switch ( irq_type )
-    {
-    case PT_IRQ_TYPE_PCI:
-    case PT_IRQ_TYPE_MSI_TRANSLATE:
-        bind->u.pci.bus = bus;
-        bind->u.pci.device = device;
-        bind->u.pci.intx = intx;
-        break;
-    case PT_IRQ_TYPE_ISA:
-        bind->u.isa.isa_irq = isa_irq;
-        break;
-    case PT_IRQ_TYPE_SPI:
-        bind->u.spi.spi = spi;
-        break;
-    default:
-        errno = EINVAL;
-        return -1;
-    }
-
-    rc = do_domctl(xch, &domctl);
-    return rc;
-}
-
-int xc_domain_bind_pt_irq(
-    xc_interface *xch,
-    uint32_t domid,
-    uint8_t machine_irq,
-    uint8_t irq_type,
-    uint8_t bus,
-    uint8_t device,
-    uint8_t intx,
-    uint8_t isa_irq)
-{
-    return xc_domain_bind_pt_irq_int(xch, domid, machine_irq, irq_type,
-                                     bus, device, intx, isa_irq, 0);
-}
-
-static int xc_domain_unbind_pt_irq_int(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t machine_irq,
-    uint8_t irq_type,
-    uint8_t bus,
-    uint8_t device,
-    uint8_t intx,
-    uint8_t isa_irq,
-    uint8_t spi)
-{
-    int rc;
-    struct xen_domctl_bind_pt_irq *bind;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
-    domctl.domain = domid;
-
-    bind = &(domctl.u.bind_pt_irq);
-    bind->irq_type = irq_type;
-    bind->machine_irq = machine_irq;
-    switch ( irq_type )
-    {
-    case PT_IRQ_TYPE_PCI:
-    case PT_IRQ_TYPE_MSI_TRANSLATE:
-        bind->u.pci.bus = bus;
-        bind->u.pci.device = device;
-        bind->u.pci.intx = intx;
-        break;
-    case PT_IRQ_TYPE_ISA:
-        bind->u.isa.isa_irq = isa_irq;
-        break;
-    case PT_IRQ_TYPE_SPI:
-        bind->u.spi.spi = spi;
-        break;
-    default:
-        errno = EINVAL;
-        return -1;
-    }
-
-    rc = do_domctl(xch, &domctl);
-    return rc;
-}
-
-int xc_domain_unbind_pt_irq(
-    xc_interface *xch,
-    uint32_t domid,
-    uint8_t machine_irq,
-    uint8_t irq_type,
-    uint8_t bus,
-    uint8_t device,
-    uint8_t intx,
-    uint8_t isa_irq)
-{
-    return xc_domain_unbind_pt_irq_int(xch, domid, machine_irq, irq_type,
-                                       bus, device, intx, isa_irq, 0);
-}
-
-int xc_domain_bind_pt_pci_irq(
-    xc_interface *xch,
-    uint32_t domid,
-    uint8_t machine_irq,
-    uint8_t bus,
-    uint8_t device,
-    uint8_t intx)
-{
-
-    return (xc_domain_bind_pt_irq(xch, domid, machine_irq,
-                                  PT_IRQ_TYPE_PCI, bus, device, intx, 0));
-}
-
-int xc_domain_bind_pt_isa_irq(
-    xc_interface *xch,
-    uint32_t domid,
-    uint8_t machine_irq)
-{
-
-    return (xc_domain_bind_pt_irq(xch, domid, machine_irq,
-                                  PT_IRQ_TYPE_ISA, 0, 0, 0, machine_irq));
-}
-
-int xc_domain_bind_pt_spi_irq(
-    xc_interface *xch,
-    uint32_t domid,
-    uint16_t vspi,
-    uint16_t spi)
-{
-    return (xc_domain_bind_pt_irq_int(xch, domid, vspi,
-                                      PT_IRQ_TYPE_SPI, 0, 0, 0, 0, spi));
-}
-
-int xc_domain_unbind_pt_spi_irq(xc_interface *xch,
-                                uint32_t domid,
-                                uint16_t vspi,
-                                uint16_t spi)
-{
-    return (xc_domain_unbind_pt_irq_int(xch, domid, vspi,
-                                        PT_IRQ_TYPE_SPI, 0, 0, 0, 0, spi));
-}
-
-int xc_domain_memory_mapping(
-    xc_interface *xch,
-    uint32_t domid,
-    unsigned long first_gfn,
-    unsigned long first_mfn,
-    unsigned long nr_mfns,
-    uint32_t add_mapping)
-{
-    DECLARE_DOMCTL;
-    xc_dominfo_t info;
-    int ret = 0, rc;
-    unsigned long done = 0, nr, max_batch_sz;
-
-    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 ||
-         info.domid != domid )
-    {
-        PERROR("Could not get info for domain");
-        return -EINVAL;
-    }
-    if ( !xc_core_arch_auto_translated_physmap(&info) )
-        return 0;
-
-    if ( !nr_mfns )
-        return 0;
-
-    domctl.cmd = XEN_DOMCTL_memory_mapping;
-    domctl.domain = domid;
-    domctl.u.memory_mapping.add_mapping = add_mapping;
-    max_batch_sz = nr_mfns;
-    do
-    {
-        nr = min_t(unsigned long, nr_mfns - done, max_batch_sz);
-        domctl.u.memory_mapping.nr_mfns = nr;
-        domctl.u.memory_mapping.first_gfn = first_gfn + done;
-        domctl.u.memory_mapping.first_mfn = first_mfn + done;
-        rc = do_domctl(xch, &domctl);
-        if ( rc < 0 && errno == E2BIG )
-        {
-            if ( max_batch_sz <= 1 )
-                break;
-            max_batch_sz >>= 1;
-            continue;
-        }
-        if ( rc > 0 )
-        {
-            done += rc;
-            continue;
-        }
-        /* Save the first error... */
-        if ( !ret )
-            ret = rc;
-        /* .. and ignore the rest of them when removing. */
-        if ( rc && add_mapping != DPCI_REMOVE_MAPPING )
-            break;
-
-        done += nr;
-    } while ( done < nr_mfns );
-
-    /*
-     * Undo what we have done unless unmapping, by unmapping the entire region.
-     * Errors here are ignored.
-     */
-    if ( ret && add_mapping != DPCI_REMOVE_MAPPING )
-        xc_domain_memory_mapping(xch, domid, first_gfn, first_mfn, nr_mfns,
-                                 DPCI_REMOVE_MAPPING);
-
-    /* We might get E2BIG so many times that we never advance. */
-    if ( !done && !ret )
-        ret = -1;
-
-    return ret;
-}
-
-int xc_domain_ioport_mapping(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t first_gport,
-    uint32_t first_mport,
-    uint32_t nr_ports,
-    uint32_t add_mapping)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_ioport_mapping;
-    domctl.domain = domid;
-    domctl.u.ioport_mapping.first_gport = first_gport;
-    domctl.u.ioport_mapping.first_mport = first_mport;
-    domctl.u.ioport_mapping.nr_ports = nr_ports;
-    domctl.u.ioport_mapping.add_mapping = add_mapping;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_set_target(
-    xc_interface *xch,
-    uint32_t domid,
-    uint32_t target)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_set_target;
-    domctl.domain = domid;
-    domctl.u.set_target.target = target;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_subscribe_for_suspend(
-    xc_interface *xch, uint32_t dom, evtchn_port_t port)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_subscribe;
-    domctl.domain = dom;
-    domctl.u.subscribe.port = port;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_debug_control(xc_interface *xc, uint32_t domid, uint32_t sop, uint32_t vcpu)
-{
-    DECLARE_DOMCTL;
-
-    memset(&domctl, 0, sizeof(domctl));
-    domctl.domain = domid;
-    domctl.cmd = XEN_DOMCTL_debug_op;
-    domctl.u.debug_op.op     = sop;
-    domctl.u.debug_op.vcpu   = vcpu;
-
-    return do_domctl(xc, &domctl);
-}
-
-int xc_domain_p2m_audit(xc_interface *xch, 
-                        uint32_t domid,
-                        uint64_t *orphans,
-                        uint64_t *m2p_bad,   
-                        uint64_t *p2m_bad)
-{
-    DECLARE_DOMCTL;
-    int rc;
-
-    domctl.cmd = XEN_DOMCTL_audit_p2m;
-    domctl.domain = domid;
-    rc = do_domctl(xch, &domctl);
-
-    *orphans = domctl.u.audit_p2m.orphans;
-    *m2p_bad = domctl.u.audit_p2m.m2p_bad;
-    *p2m_bad = domctl.u.audit_p2m.p2m_bad;
-
-    return rc;
-}
-
-int xc_domain_set_access_required(xc_interface *xch,
-                                  uint32_t domid,
-                                  unsigned int required)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_set_access_required;
-    domctl.domain = domid;
-    domctl.u.access_required.access_required = required;
-    return do_domctl(xch, &domctl);
-}
-
-int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_set_virq_handler;
-    domctl.domain = domid;
-    domctl.u.set_virq_handler.virq = virq;
-    return do_domctl(xch, &domctl);
-}
-
-/* Plumbing Xen with vNUMA topology */
-int xc_domain_setvnuma(xc_interface *xch,
-                       uint32_t domid,
-                       uint32_t nr_vnodes,
-                       uint32_t nr_vmemranges,
-                       uint32_t nr_vcpus,
-                       xen_vmemrange_t *vmemrange,
-                       unsigned int *vdistance,
-                       unsigned int *vcpu_to_vnode,
-                       unsigned int *vnode_to_pnode)
-{
-    int rc;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * nr_vmemranges,
-                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
-                             nr_vnodes * nr_vnodes,
-                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus,
-                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    DECLARE_HYPERCALL_BOUNCE(vnode_to_pnode, sizeof(*vnode_to_pnode) *
-                             nr_vnodes,
-                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    errno = EINVAL;
-
-    if ( nr_vnodes == 0 || nr_vmemranges == 0 || nr_vcpus == 0 )
-        return -1;
-
-    if ( !vdistance || !vcpu_to_vnode || !vmemrange || !vnode_to_pnode )
-    {
-        PERROR("%s: Cant set vnuma without initializing topology", __func__);
-        return -1;
-    }
-
-    if ( xc_hypercall_bounce_pre(xch, vmemrange)      ||
-         xc_hypercall_bounce_pre(xch, vdistance)      ||
-         xc_hypercall_bounce_pre(xch, vcpu_to_vnode)  ||
-         xc_hypercall_bounce_pre(xch, vnode_to_pnode) )
-    {
-        rc = -1;
-        goto vnumaset_fail;
-
-    }
-
-    set_xen_guest_handle(domctl.u.vnuma.vmemrange, vmemrange);
-    set_xen_guest_handle(domctl.u.vnuma.vdistance, vdistance);
-    set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpu_to_vnode);
-    set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vnode_to_pnode);
-
-    domctl.cmd = XEN_DOMCTL_setvnumainfo;
-    domctl.domain = domid;
-    domctl.u.vnuma.nr_vnodes = nr_vnodes;
-    domctl.u.vnuma.nr_vmemranges = nr_vmemranges;
-    domctl.u.vnuma.nr_vcpus = nr_vcpus;
-    domctl.u.vnuma.pad = 0;
-
-    rc = do_domctl(xch, &domctl);
-
- vnumaset_fail:
-    xc_hypercall_bounce_post(xch, vmemrange);
-    xc_hypercall_bounce_post(xch, vdistance);
-    xc_hypercall_bounce_post(xch, vcpu_to_vnode);
-    xc_hypercall_bounce_post(xch, vnode_to_pnode);
-
-    return rc;
-}
-
-int xc_domain_getvnuma(xc_interface *xch,
-                       uint32_t domid,
-                       uint32_t *nr_vnodes,
-                       uint32_t *nr_vmemranges,
-                       uint32_t *nr_vcpus,
-                       xen_vmemrange_t *vmemrange,
-                       unsigned int *vdistance,
-                       unsigned int *vcpu_to_vnode)
-{
-    int rc;
-    DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * *nr_vmemranges,
-                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
-                             *nr_vnodes * *nr_vnodes,
-                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * *nr_vcpus,
-                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    struct xen_vnuma_topology_info vnuma_topo;
-
-    if ( xc_hypercall_bounce_pre(xch, vmemrange)      ||
-         xc_hypercall_bounce_pre(xch, vdistance)      ||
-         xc_hypercall_bounce_pre(xch, vcpu_to_vnode) )
-    {
-        rc = -1;
-        errno = ENOMEM;
-        goto vnumaget_fail;
-    }
-
-    set_xen_guest_handle(vnuma_topo.vmemrange.h, vmemrange);
-    set_xen_guest_handle(vnuma_topo.vdistance.h, vdistance);
-    set_xen_guest_handle(vnuma_topo.vcpu_to_vnode.h, vcpu_to_vnode);
-
-    vnuma_topo.nr_vnodes = *nr_vnodes;
-    vnuma_topo.nr_vcpus = *nr_vcpus;
-    vnuma_topo.nr_vmemranges = *nr_vmemranges;
-    vnuma_topo.domid = domid;
-    vnuma_topo.pad = 0;
-
-    rc = do_memory_op(xch, XENMEM_get_vnumainfo, &vnuma_topo,
-                      sizeof(vnuma_topo));
-
-    *nr_vnodes = vnuma_topo.nr_vnodes;
-    *nr_vcpus = vnuma_topo.nr_vcpus;
-    *nr_vmemranges = vnuma_topo.nr_vmemranges;
-
- vnumaget_fail:
-    xc_hypercall_bounce_post(xch, vmemrange);
-    xc_hypercall_bounce_post(xch, vdistance);
-    xc_hypercall_bounce_post(xch, vcpu_to_vnode);
-
-    return rc;
-}
-
-int xc_domain_soft_reset(xc_interface *xch,
-                         uint32_t domid)
-{
-    DECLARE_DOMCTL;
-    domctl.cmd = XEN_DOMCTL_soft_reset;
-    domctl.domain = domid;
-    return do_domctl(xch, &domctl);
-}
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_evtchn.c b/tools/libxc/xc_evtchn.c
deleted file mode 100644 (file)
index 614786d..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/******************************************************************************
- * xc_evtchn.c
- *
- * API for manipulating and accessing inter-domain event channels.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- * Copyright (c) 2004, K A Fraser.
- */
-
-#include "xc_private.h"
-
-static int do_evtchn_op(xc_interface *xch, int cmd, void *arg,
-                        size_t arg_size, int silently_fail)
-{
-    int ret = -1;
-    DECLARE_HYPERCALL_BOUNCE(arg, arg_size, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( xc_hypercall_bounce_pre(xch, arg) )
-    {
-        PERROR("do_evtchn_op: bouncing arg failed");
-        goto out;
-    }
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_event_channel_op,
-                   cmd, HYPERCALL_BUFFER_AS_ARG(arg));
-    if ( ret < 0 && !silently_fail )
-        ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret);
-
-    xc_hypercall_bounce_post(xch, arg);
- out:
-    return ret;
-}
-
-xc_evtchn_port_or_error_t
-xc_evtchn_alloc_unbound(xc_interface *xch,
-                        uint32_t dom,
-                        uint32_t remote_dom)
-{
-    int rc;
-    struct evtchn_alloc_unbound arg = {
-        .dom        = dom,
-        .remote_dom = remote_dom,
-    };
-
-    rc = do_evtchn_op(xch, EVTCHNOP_alloc_unbound, &arg, sizeof(arg), 0);
-    if ( rc == 0 )
-        rc = arg.port;
-
-    return rc;
-}
-
-int xc_evtchn_reset(xc_interface *xch,
-                    uint32_t dom)
-{
-    struct evtchn_reset arg = { .dom = dom };
-    return do_evtchn_op(xch, EVTCHNOP_reset, &arg, sizeof(arg), 0);
-}
-
-int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status)
-{
-    return do_evtchn_op(xch, EVTCHNOP_status, status,
-                        sizeof(*status), 1);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_evtchn_compat.c b/tools/libxc/xc_evtchn_compat.c
deleted file mode 100644 (file)
index 82baf14..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Compat shims for use of 3rd party consumers of libxenctrl xc_evtchn
- * functionality which has been split into separate libraries.
- */
-
-#include <xenevtchn.h>
-
-#define XC_WANT_COMPAT_EVTCHN_API
-#include "xenctrl.h"
-
-xc_evtchn *xc_evtchn_open(xentoollog_logger *logger,
-                          unsigned open_flags)
-{
-    return xenevtchn_open(logger, open_flags);
-}
-
-int xc_evtchn_close(xc_evtchn *xce)
-{
-    return xenevtchn_close(xce);
-}
-
-int xc_evtchn_fd(xc_evtchn *xce)
-{
-    return xenevtchn_fd(xce);
-}
-
-int xc_evtchn_notify(xc_evtchn *xce, evtchn_port_t port)
-{
-    return xenevtchn_notify(xce, port);
-}
-
-evtchn_port_or_error_t
-xc_evtchn_bind_unbound_port(xc_evtchn *xce, uint32_t domid)
-{
-    return xenevtchn_bind_unbound_port(xce, domid);
-}
-
-evtchn_port_or_error_t
-xc_evtchn_bind_interdomain(xc_evtchn *xce, uint32_t domid,
-                           evtchn_port_t remote_port)
-{
-    return xenevtchn_bind_interdomain(xce, domid, remote_port);
-}
-
-evtchn_port_or_error_t
-xc_evtchn_bind_virq(xc_evtchn *xce, unsigned int virq)
-{
-    return xenevtchn_bind_virq(xce, virq);
-}
-
-int xc_evtchn_unbind(xc_evtchn *xce, evtchn_port_t port)
-{
-    return xenevtchn_unbind(xce, port);
-}
-
-evtchn_port_or_error_t
-xc_evtchn_pending(xc_evtchn *xce)
-{
-    return xenevtchn_pending(xce);
-}
-
-int xc_evtchn_unmask(xc_evtchn *xce, evtchn_port_t port)
-{
-    return xenevtchn_unmask(xce, port);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_flask.c b/tools/libxc/xc_flask.c
deleted file mode 100644 (file)
index c1652ba..0000000
+++ /dev/null
@@ -1,450 +0,0 @@
-/******************************************************************************
- * xc_flask.c
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-#include <unistd.h>
-#include <stdio.h>
-#include <fcntl.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <sys/ioctl.h>
-
-#define OCON_ISID    0    /* initial SIDs */
-#define OCON_PIRQ    1    /* physical irqs */
-#define OCON_IOPORT  2    /* io ports */
-#define OCON_IOMEM   3    /* io memory */
-#define OCON_DEVICE  4    /* pci devices */
-#define INITCONTEXTLEN  256
-
-int xc_flask_op(xc_interface *xch, xen_flask_op_t *op)
-{
-    int ret = -1;
-    DECLARE_HYPERCALL_BOUNCE(op, sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    op->interface_version = XEN_FLASK_INTERFACE_VERSION;
-
-    if ( xc_hypercall_bounce_pre(xch, op) )
-    {
-        PERROR("Could not bounce memory for flask op hypercall");
-        goto out;
-    }
-
-    ret = xencall1(xch->xcall, __HYPERVISOR_xsm_op,
-                   HYPERCALL_BUFFER_AS_ARG(op));
-    if ( ret < 0 )
-    {
-        if ( errno == EACCES )
-            fprintf(stderr, "XSM operation failed!\n");
-    }
-
-    xc_hypercall_bounce_post(xch, op);
-
- out:
-    return ret;
-}
-
-int xc_flask_load(xc_interface *xch, char *buf, uint32_t size)
-{
-    int err;
-    DECLARE_FLASK_OP;
-    DECLARE_HYPERCALL_BOUNCE(buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    if ( xc_hypercall_bounce_pre(xch, buf) )
-    {
-        PERROR("Could not bounce memory for flask op hypercall");
-        return -1;
-    }
-
-    op.cmd = FLASK_LOAD;
-    op.u.load.size = size;
-    set_xen_guest_handle(op.u.load.buffer, buf);
-    
-    err = xc_flask_op(xch, &op);
-
-    xc_hypercall_bounce_post(xch, buf);
-
-    return err;
-}
-
-int xc_flask_context_to_sid(xc_interface *xch, char *buf, uint32_t size, uint32_t *sid)
-{
-    int err;
-    DECLARE_FLASK_OP;
-    DECLARE_HYPERCALL_BOUNCE(buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, buf) )
-    {
-        PERROR("Could not bounce memory for flask op hypercall");
-        return -1;
-    }
-
-    op.cmd = FLASK_CONTEXT_TO_SID;
-    op.u.sid_context.size = size;
-    set_xen_guest_handle(op.u.sid_context.context, buf);
-    
-    err = xc_flask_op(xch, &op);
-
-    if ( !err )
-        *sid = op.u.sid_context.sid;
-
-    xc_hypercall_bounce_post(xch, buf);
-
-    return err;
-}
-
-int xc_flask_sid_to_context(xc_interface *xch, int sid, char *buf, uint32_t size)
-{
-    int err;
-    DECLARE_FLASK_OP;
-    DECLARE_HYPERCALL_BOUNCE(buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( xc_hypercall_bounce_pre(xch, buf) )
-    {
-        PERROR("Could not bounce memory for flask op hypercall");
-        return -1;
-    }
-
-    op.cmd = FLASK_SID_TO_CONTEXT;
-    op.u.sid_context.sid = sid;
-    op.u.sid_context.size = size;
-    set_xen_guest_handle(op.u.sid_context.context, buf);
-    
-    err = xc_flask_op(xch, &op);
-
-    xc_hypercall_bounce_post(xch, buf);
-   
-    return err;
-}
-
-int xc_flask_getenforce(xc_interface *xch)
-{
-    DECLARE_FLASK_OP;
-    op.cmd = FLASK_GETENFORCE;
-    
-    return xc_flask_op(xch, &op);
-}
-
-int xc_flask_setenforce(xc_interface *xch, int mode)
-{
-    DECLARE_FLASK_OP;
-    op.cmd = FLASK_SETENFORCE;
-    op.u.enforce.enforcing = mode;
-   
-    return xc_flask_op(xch, &op);
-}
-
-int xc_flask_getbool_byid(xc_interface *xch, int id, char *name, uint32_t size, int *curr, int *pend)
-{
-    int rv;
-    DECLARE_FLASK_OP;
-    DECLARE_HYPERCALL_BOUNCE(name, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( xc_hypercall_bounce_pre(xch, name) )
-    {
-        PERROR("Could not bounce memory for flask op hypercall");
-        return -1;
-    }
-
-    op.cmd = FLASK_GETBOOL;
-    op.u.boolean.bool_id = id;
-    op.u.boolean.size = size;
-    set_xen_guest_handle(op.u.boolean.name, name);
-
-    rv = xc_flask_op(xch, &op);
-
-    xc_hypercall_bounce_post(xch, name);
-
-    if ( rv )
-        return rv;
-    
-    if ( curr )
-        *curr = op.u.boolean.enforcing;
-    if ( pend )
-        *pend = op.u.boolean.pending;
-
-    return rv;
-}
-
-int xc_flask_getbool_byname(xc_interface *xch, char *name, int *curr, int *pend)
-{
-    int rv;
-    DECLARE_FLASK_OP;
-    DECLARE_HYPERCALL_BOUNCE(name, strlen(name), XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, name) )
-    {
-        PERROR("Could not bounce memory for flask op hypercall");
-        return -1;
-    }
-
-    op.cmd = FLASK_GETBOOL;
-    op.u.boolean.bool_id = -1;
-    op.u.boolean.size = strlen(name);
-    set_xen_guest_handle(op.u.boolean.name, name);
-
-    rv = xc_flask_op(xch, &op);
-
-    xc_hypercall_bounce_post(xch, name);
-
-    if ( rv )
-        return rv;
-    
-    if ( curr )
-        *curr = op.u.boolean.enforcing;
-    if ( pend )
-        *pend = op.u.boolean.pending;
-
-    return rv;
-}
-
-int xc_flask_setbool(xc_interface *xch, char *name, int value, int commit)
-{
-    int rv;
-    DECLARE_FLASK_OP;
-    DECLARE_HYPERCALL_BOUNCE(name, strlen(name), XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, name) )
-    {
-        PERROR("Could not bounce memory for flask op hypercall");
-        return -1;
-    }
-
-    op.cmd = FLASK_SETBOOL;
-    op.u.boolean.bool_id = -1;
-    op.u.boolean.new_value = value;
-    op.u.boolean.commit = 1;
-    op.u.boolean.size = strlen(name);
-    set_xen_guest_handle(op.u.boolean.name, name);
-
-    rv = xc_flask_op(xch, &op);
-
-    xc_hypercall_bounce_post(xch, name);
-
-    return rv;
-}
-
-
-static int xc_flask_add(xc_interface *xch, uint32_t ocon, uint64_t low, uint64_t high, char *scontext)
-{
-    uint32_t sid;
-    int err;
-    DECLARE_FLASK_OP;
-
-    err = xc_flask_context_to_sid(xch, scontext, strlen(scontext), &sid);
-    if ( err )
-        return err;
-
-    op.cmd = FLASK_ADD_OCONTEXT;
-    op.u.ocontext.ocon = ocon;
-    op.u.ocontext.sid = sid;
-    op.u.ocontext.low = low;
-    op.u.ocontext.high = high;
-    
-    return xc_flask_op(xch, &op);
-}
-
-int xc_flask_add_pirq(xc_interface *xch, unsigned int pirq, char *scontext)
-{
-    return xc_flask_add(xch, OCON_PIRQ, pirq, pirq, scontext);
-}
-
-int xc_flask_add_ioport(xc_interface *xch, unsigned long low, unsigned long high,
-                      char *scontext)
-{
-    return xc_flask_add(xch, OCON_IOPORT, low, high, scontext);
-}
-
-int xc_flask_add_iomem(xc_interface *xch, unsigned long low, unsigned long high,
-                     char *scontext)
-{
-    return xc_flask_add(xch, OCON_IOMEM, low, high, scontext);
-}
-
-int xc_flask_add_device(xc_interface *xch, unsigned long device, char *scontext)
-{
-    return xc_flask_add(xch, OCON_DEVICE, device, device, scontext);
-}
-
-static int xc_flask_del(xc_interface *xch, uint32_t ocon, uint64_t low, uint64_t high)
-{
-    DECLARE_FLASK_OP;
-
-    op.cmd = FLASK_DEL_OCONTEXT;
-    op.u.ocontext.ocon = ocon;
-    op.u.ocontext.low = low;
-    op.u.ocontext.high = high;
-    
-    return xc_flask_op(xch, &op);
-}
-
-int xc_flask_del_pirq(xc_interface *xch, unsigned int pirq)
-{
-    return xc_flask_del(xch, OCON_PIRQ, pirq, pirq);
-}
-
-int xc_flask_del_ioport(xc_interface *xch, unsigned long low, unsigned long high)
-{
-    return xc_flask_del(xch, OCON_IOPORT, low, high);
-}
-
-int xc_flask_del_iomem(xc_interface *xch, unsigned long low, unsigned long high)
-{
-    return xc_flask_del(xch, OCON_IOMEM, low, high);
-}
-
-int xc_flask_del_device(xc_interface *xch, unsigned long device)
-{
-    return xc_flask_del(xch, OCON_DEVICE, device, device);
-}
-
-int xc_flask_access(xc_interface *xch, const char *scon, const char *tcon,
-                uint16_t tclass, uint32_t req,
-                uint32_t *allowed, uint32_t *decided,
-                uint32_t *auditallow, uint32_t *auditdeny,
-                uint32_t *seqno)
-{
-    DECLARE_FLASK_OP;
-    int err;
-
-    err = xc_flask_context_to_sid(xch, (char*)scon, strlen(scon), &op.u.access.ssid);
-    if ( err )
-        return err;
-    err = xc_flask_context_to_sid(xch, (char*)tcon, strlen(tcon), &op.u.access.tsid);
-    if ( err )
-        return err;
-
-    op.cmd = FLASK_ACCESS;
-    op.u.access.tclass = tclass;
-    op.u.access.req = req;
-    
-    err = xc_flask_op(xch, &op);
-
-    if ( err )
-        return err;
-
-    if ( allowed )
-        *allowed = op.u.access.allowed;
-    if ( decided )
-        *decided = 0xffffffff;
-    if ( auditallow )
-        *auditallow = op.u.access.audit_allow;
-    if ( auditdeny )
-        *auditdeny = op.u.access.audit_deny;
-    if ( seqno )
-        *seqno = op.u.access.seqno;
-
-    if ( (op.u.access.allowed & req) != req )
-        err = -EPERM;
-
-    return err;
-}
-
-int xc_flask_avc_hashstats(xc_interface *xch, char *buf, int size)
-{
-    int err;
-    DECLARE_FLASK_OP;
-  
-    op.cmd = FLASK_AVC_HASHSTATS;
-  
-    err = xc_flask_op(xch, &op);
-
-    snprintf(buf, size,
-             "entries: %d\nbuckets used: %d/%d\nlongest chain: %d\n",
-             op.u.hash_stats.entries, op.u.hash_stats.buckets_used,
-             op.u.hash_stats.buckets_total, op.u.hash_stats.max_chain_len);
-
-    return err;
-}
-
-int xc_flask_avc_cachestats(xc_interface *xch, char *buf, int size)
-{
-    int err, n;
-    int i = 0;
-    DECLARE_FLASK_OP;
-
-    n = snprintf(buf, size, "lookups hits misses allocations reclaims frees\n");
-    buf += n;
-    size -= n;
-  
-    op.cmd = FLASK_AVC_CACHESTATS;
-    while ( size > 0 )
-    {
-        op.u.cache_stats.cpu = i;
-        err = xc_flask_op(xch, &op);
-        if ( err && errno == ENOENT )
-            return 0;
-        if ( err )
-            return err;
-        n = snprintf(buf, size, "%u %u %u %u %u %u\n",
-                     op.u.cache_stats.lookups, op.u.cache_stats.hits,
-                     op.u.cache_stats.misses, op.u.cache_stats.allocations,
-                     op.u.cache_stats.reclaims, op.u.cache_stats.frees);
-        buf += n;
-        size -= n;
-        i++;
-    }
-
-    return 0;
-}
-
-int xc_flask_policyvers(xc_interface *xch)
-{
-    DECLARE_FLASK_OP;
-    op.cmd = FLASK_POLICYVERS;
-
-    return xc_flask_op(xch, &op);
-}
-
-int xc_flask_getavc_threshold(xc_interface *xch)
-{
-    DECLARE_FLASK_OP;
-    op.cmd = FLASK_GETAVC_THRESHOLD;
-    
-    return xc_flask_op(xch, &op);
-}
-
-int xc_flask_setavc_threshold(xc_interface *xch, int threshold)
-{
-    DECLARE_FLASK_OP;
-    op.cmd = FLASK_SETAVC_THRESHOLD;
-    op.u.setavc_threshold.threshold = threshold;
-
-    return xc_flask_op(xch, &op);
-}
-
-int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid)
-{
-    DECLARE_FLASK_OP;
-    op.cmd = FLASK_RELABEL_DOMAIN;
-    op.u.relabel.domid = domid;
-    op.u.relabel.sid = sid;
-
-    return xc_flask_op(xch, &op);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_foreign_memory.c b/tools/libxc/xc_foreign_memory.c
deleted file mode 100644 (file)
index 4053d26..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/******************************************************************************
- * xc_foreign_memory.c
- *
- * Functions for mapping foreign domain's memory.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define XC_BUILDING_COMPAT_MAP_FOREIGN_API
-#include "xc_private.h"
-
-void *xc_map_foreign_pages(xc_interface *xch, uint32_t dom, int prot,
-                           const xen_pfn_t *arr, int num)
-{
-    if (num < 0) {
-        errno = EINVAL;
-        return NULL;
-    }
-
-    return xenforeignmemory_map(xch->fmem, dom, prot, num, arr, NULL);
-}
-
-void *xc_map_foreign_range(xc_interface *xch,
-                           uint32_t dom, int size, int prot,
-                           unsigned long mfn)
-{
-    xen_pfn_t *arr;
-    int num;
-    int i;
-    void *ret;
-
-    num = (size + XC_PAGE_SIZE - 1) >> XC_PAGE_SHIFT;
-    arr = calloc(num, sizeof(xen_pfn_t));
-    if ( arr == NULL )
-        return NULL;
-
-    for ( i = 0; i < num; i++ )
-        arr[i] = mfn + i;
-
-    ret = xc_map_foreign_pages(xch, dom, prot, arr, num);
-    free(arr);
-    return ret;
-}
-
-void *xc_map_foreign_ranges(xc_interface *xch,
-                            uint32_t dom, size_t size,
-                            int prot, size_t chunksize,
-                            privcmd_mmap_entry_t entries[],
-                            int nentries)
-{
-    xen_pfn_t *arr;
-    int num_per_entry;
-    int num;
-    int i;
-    int j;
-    void *ret;
-
-    num_per_entry = chunksize >> XC_PAGE_SHIFT;
-    num = num_per_entry * nentries;
-    arr = calloc(num, sizeof(xen_pfn_t));
-    if ( arr == NULL )
-        return NULL;
-
-    for ( i = 0; i < nentries; i++ )
-        for ( j = 0; j < num_per_entry; j++ )
-            arr[i * num_per_entry + j] = entries[i].mfn + j;
-
-    ret = xc_map_foreign_pages(xch, dom, prot, arr, num);
-    free(arr);
-    return ret;
-}
-
-void *xc_map_foreign_bulk(xc_interface *xch, uint32_t dom, int prot,
-                          const xen_pfn_t *arr, int *err, unsigned int num)
-{
-    return xenforeignmemory_map(xch->fmem, dom, prot, num, arr, err);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_freebsd.c b/tools/libxc/xc_freebsd.c
deleted file mode 100644 (file)
index 9dd48a3..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/******************************************************************************
- *
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-/* Optionally flush file to disk and discard page cache */
-void discard_file_cache(xc_interface *xch, int fd, int flush)
-{
-    off_t cur = 0;
-    int saved_errno = errno;
-
-    if ( flush && (fsync(fd) < 0) )
-        goto out;
-
-    /*
-     * Calculate last page boundary of amount written so far
-     * unless we are flushing in which case entire cache
-     * is discarded.
-     */
-    if ( !flush )
-    {
-        if ( (cur = lseek(fd, 0, SEEK_CUR)) == (off_t)-1 )
-            cur = 0;
-        cur &= ~(XC_PAGE_SIZE-1);
-    }
-
-    /* Discard from the buffer cache. */
-    if ( posix_fadvise(fd, 0, cur, POSIX_FADV_DONTNEED) < 0 )
-        goto out;
-
- out:
-    errno = saved_errno;
-}
-
-void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
-{
-    int ret;
-    void *ptr;
-
-    ret = posix_memalign(&ptr, alignment, size);
-    if ( ret != 0 || !ptr )
-        return NULL;
-
-    return ptr;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_gnttab.c b/tools/libxc/xc_gnttab.c
deleted file mode 100644 (file)
index eb92d89..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-/******************************************************************************
- *
- * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-int xc_gnttab_op(xc_interface *xch, int cmd, void * op, int op_size, int count)
-{
-    int ret = 0;
-    DECLARE_HYPERCALL_BOUNCE(op, count * op_size, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( xc_hypercall_bounce_pre(xch, op) )
-    {
-        PERROR("Could not bounce buffer for grant table op hypercall");
-        goto out1;
-    }
-
-    ret = xencall3(xch->xcall,  __HYPERVISOR_grant_table_op,
-                   cmd, HYPERCALL_BUFFER_AS_ARG(op), count);
-
-    xc_hypercall_bounce_post(xch, op);
-
- out1:
-    return ret;
-}
-
-int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query)
-{
-    int rc;
-
-    rc = xc_gnttab_op(xch, GNTTABOP_query_size, query, sizeof(*query), 1);
-
-    if ( rc || (query->status != GNTST_okay) )
-        ERROR("Could not query dom %u's grant size\n", query->dom);
-
-    return rc;
-}
-
-int xc_gnttab_get_version(xc_interface *xch, uint32_t domid)
-{
-    struct gnttab_get_version query;
-    int rc;
-
-    query.dom = domid;
-    rc = xc_gnttab_op(xch, GNTTABOP_get_version, &query, sizeof(query),
-                      1);
-    if ( rc < 0 )
-        return rc;
-    else
-        return query.version;
-}
-
-static void *_gnttab_map_table(xc_interface *xch, uint32_t domid, int *gnt_num)
-{
-    int rc, i;
-    struct gnttab_query_size query;
-    struct gnttab_setup_table setup;
-    DECLARE_HYPERCALL_BUFFER(unsigned long, frame_list);
-    xen_pfn_t *pfn_list = NULL;
-    grant_entry_v1_t *gnt = NULL;
-
-    if ( !gnt_num )
-        return NULL;
-
-    query.dom = domid;
-    rc = xc_gnttab_op(xch, GNTTABOP_query_size, &query, sizeof(query), 1);
-
-    if ( rc || (query.status != GNTST_okay) )
-    {
-        ERROR("Could not query dom%d's grant size\n", domid);
-        return NULL;
-    }
-
-    *gnt_num = query.nr_frames * (PAGE_SIZE / sizeof(grant_entry_v1_t) );
-
-    frame_list = xc_hypercall_buffer_alloc(xch, frame_list, query.nr_frames * sizeof(unsigned long));
-    if ( !frame_list )
-    {
-        ERROR("Could not allocate frame_list in xc_gnttab_map_table\n");
-        return NULL;
-    }
-
-    pfn_list = malloc(query.nr_frames * sizeof(xen_pfn_t));
-    if ( !pfn_list )
-    {
-        ERROR("Could not allocate pfn_list in xc_gnttab_map_table\n");
-        goto err;
-    }
-
-    setup.dom = domid;
-    setup.nr_frames = query.nr_frames;
-    set_xen_guest_handle(setup.frame_list, frame_list);
-
-    /* XXX Any race with other setup_table hypercall? */
-    rc = xc_gnttab_op(xch, GNTTABOP_setup_table, &setup, sizeof(setup),
-                      1);
-
-    if ( rc || (setup.status != GNTST_okay) )
-    {
-        ERROR("Could not get grant table frame list\n");
-        goto err;
-    }
-
-    for ( i = 0; i < setup.nr_frames; i++ )
-        pfn_list[i] = frame_list[i];
-
-    gnt = xc_map_foreign_pages(xch, domid, PROT_READ, pfn_list,
-                               setup.nr_frames);
-    if ( !gnt )
-    {
-        ERROR("Could not map grant table\n");
-        goto err;
-    }
-
-err:
-    if ( frame_list )
-        xc_hypercall_buffer_free(xch, frame_list);
-    free(pfn_list);
-
-    return gnt;
-}
-
-grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid,
-                                         int *gnt_num)
-{
-    if (xc_gnttab_get_version(xch, domid) == 2)
-        return NULL;
-    return _gnttab_map_table(xch, domid, gnt_num);
-}
-
-grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid,
-                                         int *gnt_num)
-{
-    if (xc_gnttab_get_version(xch, domid) != 2)
-        return NULL;
-    return _gnttab_map_table(xch, domid, gnt_num);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_gnttab_compat.c b/tools/libxc/xc_gnttab_compat.c
deleted file mode 100644 (file)
index 6f036d8..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Compat shims for use of 3rd party consumers of libxenctrl xc_gnt{tab,shr}
- * functionality which has been split into separate libraries.
- */
-
-#include <xengnttab.h>
-
-#define XC_WANT_COMPAT_GNTTAB_API
-#include "xenctrl.h"
-
-xc_gnttab *xc_gnttab_open(xentoollog_logger *logger,
-                          unsigned open_flags)
-{
-    return xengnttab_open(logger, open_flags);
-}
-
-int xc_gnttab_close(xc_gnttab *xcg)
-{
-    return xengnttab_close(xcg);
-}
-
-void *xc_gnttab_map_grant_ref(xc_gnttab *xcg,
-                              uint32_t domid,
-                              uint32_t ref,
-                              int prot)
-{
-    return xengnttab_map_grant_ref(xcg, domid, ref, prot);
-}
-
-void *xc_gnttab_map_grant_refs(xc_gnttab *xcg,
-                               uint32_t count,
-                               uint32_t *domids,
-                               uint32_t *refs,
-                               int prot)
-{
-    return xengnttab_map_grant_refs(xcg, count, domids, refs, prot);
-}
-
-void *xc_gnttab_map_domain_grant_refs(xc_gnttab *xcg,
-                                      uint32_t count,
-                                      uint32_t domid,
-                                      uint32_t *refs,
-                                      int prot)
-{
-    return xengnttab_map_domain_grant_refs(xcg, count, domid, refs, prot);
-}
-
-void *xc_gnttab_map_grant_ref_notify(xc_gnttab *xcg,
-                                     uint32_t domid,
-                                     uint32_t ref,
-                                     int prot,
-                                     uint32_t notify_offset,
-                                     evtchn_port_t notify_port)
-{
-    return xengnttab_map_grant_ref_notify(xcg, domid, ref, prot,
-                                          notify_offset, notify_port);
-}
-
-int xc_gnttab_munmap(xc_gnttab *xcg,
-                     void *start_address,
-                     uint32_t count)
-{
-    return xengnttab_unmap(xcg, start_address, count);
-}
-
-int xc_gnttab_set_max_grants(xc_gnttab *xcg,
-                             uint32_t count)
-{
-    return xengnttab_set_max_grants(xcg, count);
-}
-
-xc_gntshr *xc_gntshr_open(xentoollog_logger *logger,
-                          unsigned open_flags)
-{
-    return xengntshr_open(logger, open_flags);
-}
-
-int xc_gntshr_close(xc_gntshr *xcg)
-{
-    return xengntshr_close(xcg);
-}
-
-void *xc_gntshr_share_pages(xc_gntshr *xcg, uint32_t domid,
-                            int count, uint32_t *refs, int writable)
-{
-    return xengntshr_share_pages(xcg, domid, count, refs, writable);
-}
-
-void *xc_gntshr_share_page_notify(xc_gntshr *xcg, uint32_t domid,
-                                  uint32_t *ref, int writable,
-                                  uint32_t notify_offset,
-                                  evtchn_port_t notify_port)
-{
-    return xengntshr_share_page_notify(xcg, domid, ref, writable,
-                                       notify_offset, notify_port);
-}
-
-int xc_gntshr_munmap(xc_gntshr *xcg, void *start_address, uint32_t count)
-{
-    return xengntshr_unshare(xcg, start_address, count);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_hcall_buf.c b/tools/libxc/xc_hcall_buf.c
deleted file mode 100644 (file)
index 200671f..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 2010, Citrix Systems, Inc.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "xc_private.h"
-
-xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL) = {
-    .hbuf = NULL,
-    .param_shadow = NULL,
-    HYPERCALL_BUFFER_INIT_NO_BOUNCE
-};
-
-void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages)
-{
-    void *p = xencall_alloc_buffer_pages(xch->xcall, nr_pages);
-
-    if (!p)
-        return NULL;
-
-    b->hbuf = p;
-
-    return b->hbuf;
-}
-
-void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages)
-{
-    xencall_free_buffer_pages(xch->xcall, b->hbuf, nr_pages);
-}
-
-void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size)
-{
-    void *p = xencall_alloc_buffer(xch->xcall, size);
-
-    if (!p)
-        return NULL;
-
-    b->hbuf = p;
-
-    return b->hbuf;
-}
-
-void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b)
-{
-    xencall_free_buffer(xch->xcall, b->hbuf);
-}
-
-int xc__hypercall_bounce_pre(xc_interface *xch, xc_hypercall_buffer_t *b)
-{
-    void *p;
-
-    /*
-     * Catch hypercall buffer declared other than with DECLARE_HYPERCALL_BOUNCE.
-     */
-    if ( b->ubuf == (void *)-1 || b->dir == XC_HYPERCALL_BUFFER_BOUNCE_NONE )
-        abort();
-
-    /*
-     * Don't need to bounce a NULL buffer.
-     */
-    if ( b->ubuf == NULL )
-    {
-        b->hbuf = NULL;
-        return 0;
-    }
-
-    p = xc__hypercall_buffer_alloc(xch, b, b->sz);
-    if ( p == NULL )
-        return -1;
-
-    if ( b->dir == XC_HYPERCALL_BUFFER_BOUNCE_IN || b->dir == XC_HYPERCALL_BUFFER_BOUNCE_BOTH )
-        memcpy(b->hbuf, b->ubuf, b->sz);
-
-    return 0;
-}
-
-void xc__hypercall_bounce_post(xc_interface *xch, xc_hypercall_buffer_t *b)
-{
-    /*
-     * Catch hypercall buffer declared other than with DECLARE_HYPERCALL_BOUNCE.
-     */
-    if ( b->ubuf == (void *)-1 || b->dir == XC_HYPERCALL_BUFFER_BOUNCE_NONE )
-        abort();
-
-    if ( b->hbuf == NULL )
-        return;
-
-    if ( b->dir == XC_HYPERCALL_BUFFER_BOUNCE_OUT || b->dir == XC_HYPERCALL_BUFFER_BOUNCE_BOTH )
-        memcpy(b->ubuf, b->hbuf, b->sz);
-
-    xc__hypercall_buffer_free(xch, b);
-}
-
-struct xc_hypercall_buffer_array {
-    unsigned max_bufs;
-    xc_hypercall_buffer_t *bufs;
-};
-
-xc_hypercall_buffer_array_t *xc_hypercall_buffer_array_create(xc_interface *xch,
-                                                              unsigned n)
-{
-    xc_hypercall_buffer_array_t *array;
-    xc_hypercall_buffer_t *bufs = NULL;
-
-    array = malloc(sizeof(*array));
-    if ( array == NULL )
-        goto error;
-
-    bufs = calloc(n, sizeof(*bufs));
-    if ( bufs == NULL )
-        goto error;
-
-    array->max_bufs = n;
-    array->bufs     = bufs;
-
-    return array;
-
-error:
-    free(bufs);
-    free(array);
-    return NULL;
-}
-
-void *xc__hypercall_buffer_array_alloc(xc_interface *xch,
-                                       xc_hypercall_buffer_array_t *array,
-                                       unsigned index,
-                                       xc_hypercall_buffer_t *hbuf,
-                                       size_t size)
-{
-    void *buf;
-
-    if ( index >= array->max_bufs || array->bufs[index].hbuf )
-        abort();
-
-    buf = xc__hypercall_buffer_alloc(xch, hbuf, size);
-    if ( buf )
-        array->bufs[index] = *hbuf;
-    return buf;
-}
-
-void *xc__hypercall_buffer_array_get(xc_interface *xch,
-                                     xc_hypercall_buffer_array_t *array,
-                                     unsigned index,
-                                     xc_hypercall_buffer_t *hbuf)
-{
-    if ( index >= array->max_bufs || array->bufs[index].hbuf == NULL )
-        abort();
-
-    *hbuf = array->bufs[index];
-    return array->bufs[index].hbuf;
-}
-
-void xc_hypercall_buffer_array_destroy(xc_interface *xc,
-                                       xc_hypercall_buffer_array_t *array)
-{
-    unsigned i;
-
-    if ( array == NULL )
-        return;
-
-    for (i = 0; i < array->max_bufs; i++ )
-        xc__hypercall_buffer_free(xc, &array->bufs[i]);
-    free(array->bufs);
-    free(array);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_kexec.c b/tools/libxc/xc_kexec.c
deleted file mode 100644 (file)
index a4e8966..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-/******************************************************************************
- * xc_kexec.c
- *
- * API for loading and executing kexec images.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * Copyright (C) 2013 Citrix Systems R&D Ltd.
- */
-#include "xc_private.h"
-
-int xc_kexec_exec(xc_interface *xch, int type)
-{
-    DECLARE_HYPERCALL_BUFFER(xen_kexec_exec_t, exec);
-    int ret = -1;
-
-    exec = xc_hypercall_buffer_alloc(xch, exec, sizeof(*exec));
-    if ( exec == NULL )
-    {
-        PERROR("Could not alloc bounce buffer for kexec_exec hypercall");
-        goto out;
-    }
-
-    exec->type = type;
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
-                   KEXEC_CMD_kexec,
-                   HYPERCALL_BUFFER_AS_ARG(exec));
-
-out:
-    xc_hypercall_buffer_free(xch, exec);
-
-    return ret;
-}
-
-int xc_kexec_get_range(xc_interface *xch, int range,  int nr,
-                       uint64_t *size, uint64_t *start)
-{
-    DECLARE_HYPERCALL_BUFFER(xen_kexec_range_t, get_range);
-    int ret = -1;
-
-    get_range = xc_hypercall_buffer_alloc(xch, get_range, sizeof(*get_range));
-    if ( get_range == NULL )
-    {
-        PERROR("Could not alloc bounce buffer for kexec_get_range hypercall");
-        goto out;
-    }
-
-    get_range->range = range;
-    get_range->nr = nr;
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
-                   KEXEC_CMD_kexec_get_range,
-                   HYPERCALL_BUFFER_AS_ARG(get_range));
-
-    *size = get_range->size;
-    *start = get_range->start;
-
-out:
-    xc_hypercall_buffer_free(xch, get_range);
-
-    return ret;
-}
-
-int xc_kexec_load(xc_interface *xch, uint8_t type, uint16_t arch,
-                  uint64_t entry_maddr,
-                  uint32_t nr_segments, xen_kexec_segment_t *segments)
-{
-    int ret = -1;
-    DECLARE_HYPERCALL_BOUNCE(segments, sizeof(*segments) * nr_segments,
-                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    DECLARE_HYPERCALL_BUFFER(xen_kexec_load_t, load);
-
-    if ( xc_hypercall_bounce_pre(xch, segments) )
-    {
-        PERROR("Could not allocate bounce buffer for kexec load hypercall");
-        goto out;
-    }
-    load = xc_hypercall_buffer_alloc(xch, load, sizeof(*load));
-    if ( load == NULL )
-    {
-        PERROR("Could not allocate buffer for kexec load hypercall");
-        goto out;
-    }
-
-    load->type = type;
-    load->arch = arch;
-    load->entry_maddr = entry_maddr;
-    load->nr_segments = nr_segments;
-    set_xen_guest_handle(load->segments.h, segments);
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
-                   KEXEC_CMD_kexec_load,
-                   HYPERCALL_BUFFER_AS_ARG(load));
-
-out:
-    xc_hypercall_buffer_free(xch, load);
-    xc_hypercall_bounce_post(xch, segments);
-
-    return ret;
-}
-
-int xc_kexec_unload(xc_interface *xch, int type)
-{
-    DECLARE_HYPERCALL_BUFFER(xen_kexec_unload_t, unload);
-    int ret = -1;
-
-    unload = xc_hypercall_buffer_alloc(xch, unload, sizeof(*unload));
-    if ( unload == NULL )
-    {
-        PERROR("Could not alloc buffer for kexec unload hypercall");
-        goto out;
-    }
-
-    unload->type = type;
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
-                   KEXEC_CMD_kexec_unload,
-                   HYPERCALL_BUFFER_AS_ARG(unload));
-
-out:
-    xc_hypercall_buffer_free(xch, unload);
-
-    return ret;
-}
-
-int xc_kexec_status(xc_interface *xch, int type)
-{
-    DECLARE_HYPERCALL_BUFFER(xen_kexec_status_t, status);
-    int ret = -1;
-
-    status = xc_hypercall_buffer_alloc(xch, status, sizeof(*status));
-    if ( status == NULL )
-    {
-        PERROR("Could not alloc buffer for kexec status hypercall");
-        goto out;
-    }
-
-    status->type = type;
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_kexec_op,
-                   KEXEC_CMD_kexec_status,
-                   HYPERCALL_BUFFER_AS_ARG(status));
-
-out:
-    xc_hypercall_buffer_free(xch, status);
-
-    return ret;
-}
diff --git a/tools/libxc/xc_linux.c b/tools/libxc/xc_linux.c
deleted file mode 100644 (file)
index c67c71c..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/******************************************************************************
- *
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-/* Optionally flush file to disk and discard page cache */
-void discard_file_cache(xc_interface *xch, int fd, int flush) 
-{
-    off_t cur = 0;
-    int saved_errno = errno;
-
-    if ( flush && (fsync(fd) < 0) )
-    {
-        /*PERROR("Failed to flush file: %s", strerror(errno));*/
-        goto out;
-    }
-
-    /* 
-     * Calculate last page boundary of amount written so far 
-     * unless we are flushing in which case entire cache
-     * is discarded.
-     */
-    if ( !flush )
-    {
-        if ( (cur = lseek(fd, 0, SEEK_CUR)) == (off_t)-1 )
-            cur = 0;
-        cur &= ~(XC_PAGE_SIZE-1);
-    }
-
-    /* Discard from the buffer cache. */
-    if ( posix_fadvise64(fd, 0, cur, POSIX_FADV_DONTNEED) < 0 )
-    {
-        /*PERROR("Failed to discard cache: %s", strerror(errno));*/
-        goto out;
-    }
-
- out:
-    errno = saved_errno;
-}
-
-void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
-{
-    int ret;
-    void *ptr;
-
-    ret = posix_memalign(&ptr, alignment, size);
-    if (ret != 0 || !ptr)
-        return NULL;
-
-    return ptr;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c
deleted file mode 100644 (file)
index b452460..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/******************************************************************************
- *
- * tools/libxc/xc_mem_access.c
- *
- * Interface to low-level memory access mode functionality
- *
- * Copyright (c) 2011 Virtuata, Inc.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-#include <xen/memory.h>
-
-int xc_set_mem_access(xc_interface *xch,
-                      uint32_t domain_id,
-                      xenmem_access_t access,
-                      uint64_t first_pfn,
-                      uint32_t nr)
-{
-    xen_mem_access_op_t mao =
-    {
-        .op     = XENMEM_access_op_set_access,
-        .domid  = domain_id,
-        .access = access,
-        .pfn    = first_pfn,
-        .nr     = nr
-    };
-
-    return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
-}
-
-int xc_set_mem_access_multi(xc_interface *xch,
-                            uint32_t domain_id,
-                            uint8_t *access,
-                            uint64_t *pages,
-                            uint32_t nr)
-{
-    DECLARE_HYPERCALL_BOUNCE(access, nr, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    DECLARE_HYPERCALL_BOUNCE(pages, nr * sizeof(uint64_t),
-                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    int rc;
-
-    xen_mem_access_op_t mao =
-    {
-        .op       = XENMEM_access_op_set_access_multi,
-        .domid    = domain_id,
-        .access   = XENMEM_access_default + 1, /* Invalid value */
-        .pfn      = ~0UL, /* Invalid GFN */
-        .nr       = nr,
-    };
-
-    if ( xc_hypercall_bounce_pre(xch, pages) ||
-         xc_hypercall_bounce_pre(xch, access) )
-    {
-        PERROR("Could not bounce memory for XENMEM_access_op_set_access_multi");
-        return -1;
-    }
-
-    set_xen_guest_handle(mao.pfn_list, pages);
-    set_xen_guest_handle(mao.access_list, access);
-
-    rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
-
-    xc_hypercall_bounce_post(xch, access);
-    xc_hypercall_bounce_post(xch, pages);
-
-    return rc;
-}
-
-int xc_get_mem_access(xc_interface *xch,
-                      uint32_t domain_id,
-                      uint64_t pfn,
-                      xenmem_access_t *access)
-{
-    int rc;
-    xen_mem_access_op_t mao =
-    {
-        .op    = XENMEM_access_op_get_access,
-        .domid = domain_id,
-        .pfn   = pfn
-    };
-
-    rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
-
-    if ( rc == 0 )
-        *access = mao.access;
-
-    return rc;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End: 
- */
diff --git a/tools/libxc/xc_mem_paging.c b/tools/libxc/xc_mem_paging.c
deleted file mode 100644 (file)
index 738f63a..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-/******************************************************************************
- *
- * tools/libxc/xc_mem_paging.c
- *
- * Interface to low-level memory paging functionality.
- *
- * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-static int xc_mem_paging_memop(xc_interface *xch, uint32_t domain_id,
-                               unsigned int op, uint64_t gfn, void *buffer)
-{
-    xen_mem_paging_op_t mpo;
-    DECLARE_HYPERCALL_BOUNCE(buffer, XC_PAGE_SIZE,
-                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    int rc;
-
-    memset(&mpo, 0, sizeof(mpo));
-
-    mpo.op      = op;
-    mpo.domain  = domain_id;
-    mpo.gfn     = gfn;
-
-    if ( buffer )
-    {
-        if ( xc_hypercall_bounce_pre(xch, buffer) )
-        {
-            PERROR("Could not bounce memory for XENMEM_paging_op %u", op);
-            return -1;
-        }
-
-        set_xen_guest_handle(mpo.buffer, buffer);
-    }
-
-    rc = do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
-
-    if ( buffer )
-        xc_hypercall_bounce_post(xch, buffer);
-
-    return rc;
-}
-
-int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
-                         uint32_t *port)
-{
-    if ( !port )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    return xc_vm_event_control(xch, domain_id,
-                               XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               port);
-}
-
-int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id)
-{
-    return xc_vm_event_control(xch, domain_id,
-                               XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               NULL);
-}
-
-int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id)
-{
-    return xc_vm_event_control(xch, domain_id,
-                               XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
-                               NULL);
-}
-
-int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
-{
-    return xc_mem_paging_memop(xch, domain_id,
-                               XENMEM_paging_op_nominate,
-                               gfn, NULL);
-}
-
-int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
-{
-    return xc_mem_paging_memop(xch, domain_id,
-                               XENMEM_paging_op_evict,
-                               gfn, NULL);
-}
-
-int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
-{
-    return xc_mem_paging_memop(xch, domain_id,
-                               XENMEM_paging_op_prep,
-                               gfn, NULL);
-}
-
-int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
-                       uint64_t gfn, void *buffer)
-{
-    errno = EINVAL;
-
-    if ( !buffer )
-        return -1;
-
-    return xc_mem_paging_memop(xch, domain_id, XENMEM_paging_op_prep,
-                               gfn, buffer);
-}
-
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End: 
- */
diff --git a/tools/libxc/xc_memshr.c b/tools/libxc/xc_memshr.c
deleted file mode 100644 (file)
index a6cfd7d..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/******************************************************************************
- *
- * xc_memshr.c
- *
- * Interface to low-level memory sharing functionality.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Grzegorz Milos)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-#include <xen/memory.h>
-#include <xen/grant_table.h>
-
-int xc_memshr_control(xc_interface *xch,
-                      uint32_t domid,
-                      int enable)
-{
-    DECLARE_DOMCTL;
-    struct xen_domctl_mem_sharing_op *op;
-
-    domctl.cmd = XEN_DOMCTL_mem_sharing_op;
-    domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
-    domctl.domain = domid;
-    op = &(domctl.u.mem_sharing_op);
-    op->op = XEN_DOMCTL_MEM_SHARING_CONTROL;
-    op->u.enable = enable;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_memshr_ring_enable(xc_interface *xch,
-                          uint32_t domid,
-                          uint32_t *port)
-{
-    if ( !port )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               port);
-}
-
-int xc_memshr_ring_disable(xc_interface *xch,
-                           uint32_t domid)
-{
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               NULL);
-}
-
-static int xc_memshr_memop(xc_interface *xch, uint32_t domid,
-                            xen_mem_sharing_op_t *mso)
-{
-    mso->domain = domid;
-
-    return do_memory_op(xch, XENMEM_sharing_op, mso, sizeof(*mso));
-}
-
-int xc_memshr_nominate_gfn(xc_interface *xch,
-                           uint32_t domid,
-                           unsigned long gfn,
-                           uint64_t *handle)
-{
-    int rc;
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_nominate_gfn;
-    mso.u.nominate.u.gfn = gfn;
-
-    rc = xc_memshr_memop(xch, domid, &mso);
-
-    if ( !rc )
-        *handle = mso.u.nominate.handle;
-
-    return rc;
-}
-
-int xc_memshr_nominate_gref(xc_interface *xch,
-                            uint32_t domid,
-                            grant_ref_t gref,
-                            uint64_t *handle)
-{
-    int rc;
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_nominate_gref;
-    mso.u.nominate.u.grant_ref = gref;
-
-    rc = xc_memshr_memop(xch, domid, &mso);
-
-    if ( !rc )
-        *handle = mso.u.nominate.handle;
-
-    return rc;
-}
-
-int xc_memshr_share_gfns(xc_interface *xch,
-                         uint32_t source_domain,
-                         unsigned long source_gfn,
-                         uint64_t source_handle,
-                         uint32_t client_domain,
-                         unsigned long client_gfn,
-                         uint64_t client_handle)
-{
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_share;
-
-    mso.u.share.source_handle = source_handle;
-    mso.u.share.source_gfn    = source_gfn;
-    mso.u.share.client_domain = client_domain;
-    mso.u.share.client_gfn    = client_gfn;
-    mso.u.share.client_handle = client_handle;
-
-    return xc_memshr_memop(xch, source_domain, &mso);
-}
-
-int xc_memshr_share_grefs(xc_interface *xch,
-                          uint32_t source_domain,
-                          grant_ref_t source_gref,
-                          uint64_t source_handle,
-                          uint32_t client_domain,
-                          grant_ref_t client_gref,
-                          uint64_t client_handle)
-{
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_share;
-
-    mso.u.share.source_handle = source_handle;
-    XENMEM_SHARING_OP_FIELD_MAKE_GREF(mso.u.share.source_gfn, source_gref);
-    mso.u.share.client_domain = client_domain;
-    XENMEM_SHARING_OP_FIELD_MAKE_GREF(mso.u.share.client_gfn, client_gref);
-    mso.u.share.client_handle = client_handle;
-
-    return xc_memshr_memop(xch, source_domain, &mso);
-}
-
-int xc_memshr_add_to_physmap(xc_interface *xch,
-                    uint32_t source_domain,
-                    unsigned long source_gfn,
-                    uint64_t source_handle,
-                    uint32_t client_domain,
-                    unsigned long client_gfn)
-{
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_add_physmap;
-
-    mso.u.share.source_handle = source_handle;
-    mso.u.share.source_gfn    = source_gfn;
-    mso.u.share.client_domain = client_domain;
-    mso.u.share.client_gfn    = client_gfn;
-
-    return xc_memshr_memop(xch, source_domain, &mso);
-}
-
-int xc_memshr_range_share(xc_interface *xch,
-                          uint32_t source_domain,
-                          uint32_t client_domain,
-                          uint64_t first_gfn,
-                          uint64_t last_gfn)
-{
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_range_share;
-
-    mso.u.range.client_domain = client_domain;
-    mso.u.range.first_gfn = first_gfn;
-    mso.u.range.last_gfn = last_gfn;
-
-    return xc_memshr_memop(xch, source_domain, &mso);
-}
-
-int xc_memshr_domain_resume(xc_interface *xch,
-                            uint32_t domid)
-{
-    return xc_vm_event_control(xch, domid,
-                               XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
-                               NULL);
-}
-
-int xc_memshr_debug_gfn(xc_interface *xch,
-                        uint32_t domid,
-                        unsigned long gfn)
-{
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_debug_gfn;
-    mso.u.debug.u.gfn = gfn;
-
-    return xc_memshr_memop(xch, domid, &mso);
-}
-
-int xc_memshr_debug_gref(xc_interface *xch,
-                         uint32_t domid,
-                         grant_ref_t gref)
-{
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_debug_gref;
-    mso.u.debug.u.gref = gref;
-
-    return xc_memshr_memop(xch, domid, &mso);
-}
-
-int xc_memshr_fork(xc_interface *xch, uint32_t pdomid, uint32_t domid,
-                   bool allow_with_iommu, bool block_interrupts)
-{
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_fork;
-    mso.u.fork.parent_domain = pdomid;
-
-    if ( allow_with_iommu )
-        mso.u.fork.flags |= XENMEM_FORK_WITH_IOMMU_ALLOWED;
-    if ( block_interrupts )
-        mso.u.fork.flags |= XENMEM_FORK_BLOCK_INTERRUPTS;
-
-    return xc_memshr_memop(xch, domid, &mso);
-}
-
-int xc_memshr_fork_reset(xc_interface *xch, uint32_t domid)
-{
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-    mso.op = XENMEM_sharing_op_fork_reset;
-
-    return xc_memshr_memop(xch, domid, &mso);
-}
-
-int xc_memshr_audit(xc_interface *xch)
-{
-    xen_mem_sharing_op_t mso;
-
-    memset(&mso, 0, sizeof(mso));
-
-    mso.op = XENMEM_sharing_op_audit;
-
-    return do_memory_op(xch, XENMEM_sharing_op, &mso, sizeof(mso));
-}
-
-long xc_sharing_freed_pages(xc_interface *xch)
-{
-    return do_memory_op(xch, XENMEM_get_sharing_freed_pages, NULL, 0);
-}
-
-long xc_sharing_used_frames(xc_interface *xch)
-{
-    return do_memory_op(xch, XENMEM_get_sharing_shared_pages, NULL, 0);
-}
diff --git a/tools/libxc/xc_minios.c b/tools/libxc/xc_minios.c
deleted file mode 100644 (file)
index 1799daa..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/******************************************************************************
- *
- * Copyright 2007-2008 Samuel Thibault <samuel.thibault@eu.citrix.com>.
- * All rights reserved.
- * Use is subject to license terms.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#undef NDEBUG
-#include <mini-os/types.h>
-#include <mini-os/os.h>
-#include <mini-os/mm.h>
-#include <mini-os/lib.h>
-
-#include <xen/memory.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <assert.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <malloc.h>
-
-#include "xc_private.h"
-
-void minios_interface_close_fd(int fd);
-
-extern void minios_interface_close_fd(int fd);
-
-void minios_interface_close_fd(int fd)
-{
-    files[fd].type = FTYPE_NONE;
-}
-
-/* Optionally flush file to disk and discard page cache */
-void discard_file_cache(xc_interface *xch, int fd, int flush)
-{
-    if (flush)
-        fsync(fd);
-}
-
-void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
-{
-    return memalign(alignment, size);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
deleted file mode 100644 (file)
index 3820394..0000000
+++ /dev/null
@@ -1,999 +0,0 @@
-/******************************************************************************
- * xc_misc.c
- *
- * Miscellaneous control interface functions.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_bitops.h"
-#include "xc_private.h"
-#include <xen/hvm/hvm_op.h>
-
-int xc_get_max_cpus(xc_interface *xch)
-{
-    static int max_cpus = 0;
-    xc_physinfo_t physinfo;
-
-    if ( max_cpus )
-        return max_cpus;
-
-    if ( !xc_physinfo(xch, &physinfo) )
-    {
-        max_cpus = physinfo.max_cpu_id + 1;
-        return max_cpus;
-    }
-
-    return -1;
-}
-
-int xc_get_online_cpus(xc_interface *xch)
-{
-    xc_physinfo_t physinfo;
-
-    if ( !xc_physinfo(xch, &physinfo) )
-        return physinfo.nr_cpus;
-
-    return -1;
-}
-
-int xc_get_max_nodes(xc_interface *xch)
-{
-    static int max_nodes = 0;
-    xc_physinfo_t physinfo;
-
-    if ( max_nodes )
-        return max_nodes;
-
-    if ( !xc_physinfo(xch, &physinfo) )
-    {
-        max_nodes = physinfo.max_node_id + 1;
-        return max_nodes;
-    }
-
-    return -1;
-}
-
-int xc_get_cpumap_size(xc_interface *xch)
-{
-    int max_cpus = xc_get_max_cpus(xch);
-
-    if ( max_cpus < 0 )
-        return -1;
-    return (max_cpus + 7) / 8;
-}
-
-int xc_get_nodemap_size(xc_interface *xch)
-{
-    int max_nodes = xc_get_max_nodes(xch);
-
-    if ( max_nodes < 0 )
-        return -1;
-    return (max_nodes + 7) / 8;
-}
-
-xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
-{
-    int sz;
-
-    sz = xc_get_cpumap_size(xch);
-    if (sz <= 0)
-        return NULL;
-    return calloc(1, sz);
-}
-
-/*
- * xc_bitops.h has macros that do this as well - however they assume that
- * the bitmask is word aligned but xc_cpumap_t is only guaranteed to be
- * byte aligned and so we need byte versions for architectures which do
- * not support misaligned accesses (which is basically everyone
- * but x86, although even on x86 it can be inefficient).
- *
- * NOTE: The xc_bitops macros now use byte alignment.
- * TODO: Clean up the users of this interface.
- */
-#define BITS_PER_CPUMAP(map) (sizeof(*map) * 8)
-#define CPUMAP_ENTRY(cpu, map) ((map))[(cpu) / BITS_PER_CPUMAP(map)]
-#define CPUMAP_SHIFT(cpu, map) ((cpu) % BITS_PER_CPUMAP(map))
-void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map)
-{
-    CPUMAP_ENTRY(cpu, map) &= ~(1U << CPUMAP_SHIFT(cpu, map));
-}
-
-void xc_cpumap_setcpu(int cpu, xc_cpumap_t map)
-{
-    CPUMAP_ENTRY(cpu, map) |= (1U << CPUMAP_SHIFT(cpu, map));
-}
-
-int xc_cpumap_testcpu(int cpu, xc_cpumap_t map)
-{
-    return (CPUMAP_ENTRY(cpu, map) >> CPUMAP_SHIFT(cpu, map)) & 1;
-}
-
-xc_nodemap_t xc_nodemap_alloc(xc_interface *xch)
-{
-    int sz;
-
-    sz = xc_get_nodemap_size(xch);
-    if (sz <= 0)
-        return NULL;
-    return calloc(1, sz);
-}
-
-int xc_readconsolering(xc_interface *xch,
-                       char *buffer,
-                       unsigned int *pnr_chars,
-                       int clear, int incremental, uint32_t *pindex)
-{
-    int ret;
-    unsigned int nr_chars = *pnr_chars;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(buffer, nr_chars, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( xc_hypercall_bounce_pre(xch, buffer) )
-        return -1;
-
-    sysctl.cmd = XEN_SYSCTL_readconsole;
-    set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
-    sysctl.u.readconsole.count = nr_chars;
-    sysctl.u.readconsole.clear = clear;
-    sysctl.u.readconsole.incremental = 0;
-    if ( pindex )
-    {
-        sysctl.u.readconsole.index = *pindex;
-        sysctl.u.readconsole.incremental = incremental;
-    }
-
-    if ( (ret = do_sysctl(xch, &sysctl)) == 0 )
-    {
-        *pnr_chars = sysctl.u.readconsole.count;
-        if ( pindex )
-            *pindex = sysctl.u.readconsole.index;
-    }
-
-    xc_hypercall_bounce_post(xch, buffer);
-
-    return ret;
-}
-
-int xc_send_debug_keys(xc_interface *xch, const char *keys)
-{
-    int ret, len = strlen(keys);
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE_IN(keys, len);
-
-    if ( xc_hypercall_bounce_pre(xch, keys) )
-        return -1;
-
-    sysctl.cmd = XEN_SYSCTL_debug_keys;
-    set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
-    sysctl.u.debug_keys.nr_keys = len;
-
-    ret = do_sysctl(xch, &sysctl);
-
-    xc_hypercall_bounce_post(xch, keys);
-
-    return ret;
-}
-
-int xc_physinfo(xc_interface *xch,
-                xc_physinfo_t *put_info)
-{
-    int ret;
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_physinfo;
-
-    memcpy(&sysctl.u.physinfo, put_info, sizeof(*put_info));
-
-    if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
-        return ret;
-
-    memcpy(put_info, &sysctl.u.physinfo, sizeof(*put_info));
-
-    return 0;
-}
-
-int xc_microcode_update(xc_interface *xch, const void *buf, size_t len)
-{
-    int ret;
-    DECLARE_PLATFORM_OP;
-    DECLARE_HYPERCALL_BUFFER(struct xenpf_microcode_update, uc);
-
-    uc = xc_hypercall_buffer_alloc(xch, uc, len);
-    if ( uc == NULL )
-        return -1;
-
-    memcpy(uc, buf, len);
-
-    platform_op.cmd = XENPF_microcode_update;
-    platform_op.u.microcode.length = len;
-    set_xen_guest_handle(platform_op.u.microcode.data, uc);
-
-    ret = do_platform_op(xch, &platform_op);
-
-    xc_hypercall_buffer_free(xch, uc);
-
-    return ret;
-}
-
-int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
-                   xc_cputopo_t *cputopo)
-{
-    int ret;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(cputopo, *max_cpus * sizeof(*cputopo),
-                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( (ret = xc_hypercall_bounce_pre(xch, cputopo)) )
-        goto out;
-
-    sysctl.u.cputopoinfo.num_cpus = *max_cpus;
-    set_xen_guest_handle(sysctl.u.cputopoinfo.cputopo, cputopo);
-
-    sysctl.cmd = XEN_SYSCTL_cputopoinfo;
-
-    if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
-        goto out;
-
-    *max_cpus = sysctl.u.cputopoinfo.num_cpus;
-
-out:
-    xc_hypercall_bounce_post(xch, cputopo);
-
-    return ret;
-}
-
-int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
-                xc_meminfo_t *meminfo, uint32_t *distance)
-{
-    int ret;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(meminfo, *max_nodes * sizeof(*meminfo),
-                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BOUNCE(distance,
-                             *max_nodes * *max_nodes * sizeof(*distance),
-                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( (ret = xc_hypercall_bounce_pre(xch, meminfo)) )
-        goto out;
-    if ((ret = xc_hypercall_bounce_pre(xch, distance)) )
-        goto out;
-
-    sysctl.u.numainfo.num_nodes = *max_nodes;
-    set_xen_guest_handle(sysctl.u.numainfo.meminfo, meminfo);
-    set_xen_guest_handle(sysctl.u.numainfo.distance, distance);
-
-    sysctl.cmd = XEN_SYSCTL_numainfo;
-
-    if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
-        goto out;
-
-    *max_nodes = sysctl.u.numainfo.num_nodes;
-
-out:
-    xc_hypercall_bounce_post(xch, meminfo);
-    xc_hypercall_bounce_post(xch, distance);
-
-    return ret;
-}
-
-int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
-                   physdev_pci_device_t *devs,
-                   uint32_t *nodes)
-{
-    int ret = 0;
-    unsigned processed = 0;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(devs, num_devs * sizeof(*devs),
-                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    DECLARE_HYPERCALL_BOUNCE(nodes, num_devs* sizeof(*nodes),
-                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( (ret = xc_hypercall_bounce_pre(xch, devs)) )
-        goto out;
-    if ( (ret = xc_hypercall_bounce_pre(xch, nodes)) )
-        goto out;
-
-    sysctl.cmd = XEN_SYSCTL_pcitopoinfo;
-
-    while ( processed < num_devs )
-    {
-        sysctl.u.pcitopoinfo.num_devs = num_devs - processed;
-        set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.devs, devs,
-                                    processed);
-        set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.nodes, nodes,
-                                    processed);
-
-        if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
-                break;
-
-        processed += sysctl.u.pcitopoinfo.num_devs;
-    }
-
- out:
-    xc_hypercall_bounce_post(xch, devs);
-    xc_hypercall_bounce_post(xch, nodes);
-
-    return ret;
-}
-
-int xc_sched_id(xc_interface *xch,
-                int *sched_id)
-{
-    int ret;
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_sched_id;
-
-    if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
-        return ret;
-
-    *sched_id = sysctl.u.sched_id.sched_id;
-
-    return 0;
-}
-
-#if defined(__i386__) || defined(__x86_64__)
-int xc_mca_op(xc_interface *xch, struct xen_mc *mc)
-{
-    int ret = 0;
-    DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( xc_hypercall_bounce_pre(xch, mc) )
-    {
-        PERROR("Could not bounce xen_mc memory buffer");
-        return -1;
-    }
-    mc->interface_version = XEN_MCA_INTERFACE_VERSION;
-
-    ret = xencall1(xch->xcall, __HYPERVISOR_mca,
-                   HYPERCALL_BUFFER_AS_ARG(mc));
-
-    xc_hypercall_bounce_post(xch, mc);
-    return ret;
-}
-
-int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
-                        xc_cpumap_t cpumap, unsigned int nr_bits)
-{
-    int ret = -1;
-    struct xen_mc mc_buf, *mc = &mc_buf;
-    struct xen_mc_inject_v2 *inject = &mc->u.mc_inject_v2;
-
-    DECLARE_HYPERCALL_BOUNCE(cpumap, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    memset(mc, 0, sizeof(*mc));
-
-    if ( cpumap )
-    {
-        if ( !nr_bits )
-        {
-            errno = EINVAL;
-            goto out;
-        }
-
-        HYPERCALL_BOUNCE_SET_SIZE(cpumap, (nr_bits + 7) / 8);
-        if ( xc_hypercall_bounce_pre(xch, cpumap) )
-        {
-            PERROR("Could not bounce cpumap memory buffer");
-            goto out;
-        }
-        set_xen_guest_handle(inject->cpumap.bitmap, cpumap);
-        inject->cpumap.nr_bits = nr_bits;
-    }
-
-    inject->flags = flags;
-    mc->cmd = XEN_MC_inject_v2;
-    mc->interface_version = XEN_MCA_INTERFACE_VERSION;
-
-    if ( xc_hypercall_bounce_pre(xch, mc) )
-    {
-        PERROR("Could not bounce xen_mc memory buffer");
-        goto out_free_cpumap;
-    }
-
-    ret = xencall1(xch->xcall, __HYPERVISOR_mca, HYPERCALL_BUFFER_AS_ARG(mc));
-
-    xc_hypercall_bounce_post(xch, mc);
-out_free_cpumap:
-    if ( cpumap )
-        xc_hypercall_bounce_post(xch, cpumap);
-out:
-    return ret;
-}
-#endif /* __i386__ || __x86_64__ */
-
-int xc_perfc_reset(xc_interface *xch)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_perfc_op;
-    sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_reset;
-    set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
-    set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
-
-    return do_sysctl(xch, &sysctl);
-}
-
-int xc_perfc_query_number(xc_interface *xch,
-                          int *nbr_desc,
-                          int *nbr_val)
-{
-    int rc;
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_perfc_op;
-    sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
-    set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
-    set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
-
-    rc = do_sysctl(xch, &sysctl);
-
-    if ( nbr_desc )
-        *nbr_desc = sysctl.u.perfc_op.nr_counters;
-    if ( nbr_val )
-        *nbr_val = sysctl.u.perfc_op.nr_vals;
-
-    return rc;
-}
-
-int xc_perfc_query(xc_interface *xch,
-                   struct xc_hypercall_buffer *desc,
-                   struct xc_hypercall_buffer *val)
-{
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BUFFER_ARGUMENT(desc);
-    DECLARE_HYPERCALL_BUFFER_ARGUMENT(val);
-
-    sysctl.cmd = XEN_SYSCTL_perfc_op;
-    sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
-    set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
-    set_xen_guest_handle(sysctl.u.perfc_op.val, val);
-
-    return do_sysctl(xch, &sysctl);
-}
-
-int xc_lockprof_reset(xc_interface *xch)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_lockprof_op;
-    sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_reset;
-    set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
-
-    return do_sysctl(xch, &sysctl);
-}
-
-int xc_lockprof_query_number(xc_interface *xch,
-                             uint32_t *n_elems)
-{
-    int rc;
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_lockprof_op;
-    sysctl.u.lockprof_op.max_elem = 0;
-    sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
-    set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
-
-    rc = do_sysctl(xch, &sysctl);
-
-    *n_elems = sysctl.u.lockprof_op.nr_elem;
-
-    return rc;
-}
-
-int xc_lockprof_query(xc_interface *xch,
-                      uint32_t *n_elems,
-                      uint64_t *time,
-                      struct xc_hypercall_buffer *data)
-{
-    int rc;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BUFFER_ARGUMENT(data);
-
-    sysctl.cmd = XEN_SYSCTL_lockprof_op;
-    sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
-    sysctl.u.lockprof_op.max_elem = *n_elems;
-    set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
-
-    rc = do_sysctl(xch, &sysctl);
-
-    *n_elems = sysctl.u.lockprof_op.nr_elem;
-
-    return rc;
-}
-
-int xc_getcpuinfo(xc_interface *xch, int max_cpus,
-                  xc_cpuinfo_t *info, int *nr_cpus)
-{
-    int rc;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(info, max_cpus*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-
-    if ( xc_hypercall_bounce_pre(xch, info) )
-        return -1;
-
-    sysctl.cmd = XEN_SYSCTL_getcpuinfo;
-    sysctl.u.getcpuinfo.max_cpus = max_cpus;
-    set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
-
-    rc = do_sysctl(xch, &sysctl);
-
-    xc_hypercall_bounce_post(xch, info);
-
-    if ( nr_cpus )
-        *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
-
-    return rc;
-}
-
-int xc_livepatch_upload(xc_interface *xch,
-                        char *name,
-                        unsigned char *payload,
-                        uint32_t size)
-{
-    int rc;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BUFFER(char, local);
-    DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    struct xen_livepatch_name def_name = { };
-
-    if ( !name || !payload )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    def_name.size = strlen(name) + 1;
-    if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
-
-    if ( xc_hypercall_bounce_pre(xch, name) )
-        return -1;
-
-    local = xc_hypercall_buffer_alloc(xch, local, size);
-    if ( !local )
-    {
-        xc_hypercall_bounce_post(xch, name);
-        return -1;
-    }
-    memcpy(local, payload, size);
-
-    sysctl.cmd = XEN_SYSCTL_livepatch_op;
-    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_UPLOAD;
-    sysctl.u.livepatch.pad = 0;
-    sysctl.u.livepatch.u.upload.size = size;
-    set_xen_guest_handle(sysctl.u.livepatch.u.upload.payload, local);
-
-    sysctl.u.livepatch.u.upload.name = def_name;
-    set_xen_guest_handle(sysctl.u.livepatch.u.upload.name.name, name);
-
-    rc = do_sysctl(xch, &sysctl);
-
-    xc_hypercall_buffer_free(xch, local);
-    xc_hypercall_bounce_post(xch, name);
-
-    return rc;
-}
-
-int xc_livepatch_get(xc_interface *xch,
-                     char *name,
-                     struct xen_livepatch_status *status)
-{
-    int rc;
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(name, 0 /*adjust later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    struct xen_livepatch_name def_name = { };
-
-    if ( !name )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    def_name.size = strlen(name) + 1;
-    if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
-
-    if ( xc_hypercall_bounce_pre(xch, name) )
-        return -1;
-
-    sysctl.cmd = XEN_SYSCTL_livepatch_op;
-    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_GET;
-    sysctl.u.livepatch.pad = 0;
-
-    sysctl.u.livepatch.u.get.status.state = 0;
-    sysctl.u.livepatch.u.get.status.rc = 0;
-
-    sysctl.u.livepatch.u.get.name = def_name;
-    set_xen_guest_handle(sysctl.u.livepatch.u.get.name.name, name);
-
-    rc = do_sysctl(xch, &sysctl);
-
-    xc_hypercall_bounce_post(xch, name);
-
-    memcpy(status, &sysctl.u.livepatch.u.get.status, sizeof(*status));
-
-    return rc;
-}
-
-/*
- * Get a number of available payloads and get actual total size of
- * the payloads' name and metadata arrays.
- *
- * This functions is typically executed first before the xc_livepatch_list()
- * to obtain the sizes and correctly allocate all necessary data resources.
- *
- * The return value is zero if the hypercall completed successfully.
- *
- * If there was an error performing the sysctl operation, the return value
- * will contain the hypercall error code value.
- */
-int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
-                                uint32_t *name_total_size,
-                                uint32_t *metadata_total_size)
-{
-    DECLARE_SYSCTL;
-    int rc;
-
-    if ( !nr || !name_total_size || !metadata_total_size )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    memset(&sysctl, 0, sizeof(sysctl));
-    sysctl.cmd = XEN_SYSCTL_livepatch_op;
-    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST;
-
-    rc = do_sysctl(xch, &sysctl);
-    if ( rc )
-        return rc;
-
-    *nr = sysctl.u.livepatch.u.list.nr;
-    *name_total_size = sysctl.u.livepatch.u.list.name_total_size;
-    *metadata_total_size = sysctl.u.livepatch.u.list.metadata_total_size;
-
-    return 0;
-}
-
-/*
- * The heart of this function is to get an array of the following objects:
- *   - xen_livepatch_status_t: states and return codes of payloads
- *   - name: names of payloads
- *   - len: lengths of corresponding payloads' names
- *   - metadata: payloads' metadata
- *   - metadata_len: lengths of corresponding payloads' metadata
- *
- * However it is complex because it has to deal with the hypervisor
- * returning some of the requested data or data being stale
- * (another hypercall might alter the list).
- *
- * The parameters that the function expects to contain data from
- * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
- * 'left' are also updated with the number of entries filled out
- * and respectively the number of entries left to get from hypervisor.
- *
- * It is expected that the caller of this function will first issue the
- * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
- * and all metadata as well as the current number of payload entries.
- * The total sizes are required and supplied via the 'name_total_size' and
- * 'metadata_total_size' parameters.
- *
- * The 'max' is to be provided by the caller with the maximum number of
- * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
- * can be filled up with.
- *
- * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
- * structure size.
- *
- * Each entry in the 'name' array may have an arbitrary size.
- *
- * Each entry in the 'len' array is expected to be of uint32_t size.
- *
- * Each entry in the 'metadata' array may have an arbitrary size.
- *
- * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
- *
- * The return value is zero if the hypercall completed successfully.
- * Note that the return value is _not_ the amount of entries filled
- * out - that is saved in 'done'.
- *
- * If there was an error performing the operation, the return value
- * will contain an negative -EXX type value. The 'done' and 'left'
- * will contain the number of entries that had been succesfully
- * retrieved (if any).
- */
-int xc_livepatch_list(xc_interface *xch, const unsigned int max,
-                      const unsigned int start,
-                      struct xen_livepatch_status *info,
-                      char *name, uint32_t *len,
-                      const uint32_t name_total_size,
-                      char *metadata, uint32_t *metadata_len,
-                      const uint32_t metadata_total_size,
-                      unsigned int *done, unsigned int *left)
-{
-    int rc;
-    DECLARE_SYSCTL;
-    /* The sizes are adjusted later - hence zero. */
-    DECLARE_HYPERCALL_BOUNCE(info, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BOUNCE(len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BOUNCE(metadata, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BOUNCE(metadata_len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    uint32_t max_batch_sz, nr;
-    uint32_t version = 0, retries = 0;
-    uint32_t adjust = 0;
-    uint32_t name_off = 0, metadata_off = 0;
-    uint32_t name_sz, metadata_sz;
-
-    if ( !max || !info || !name || !len ||
-         !metadata || !metadata_len || !done || !left )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    if ( name_total_size == 0 )
-    {
-        errno = ENOENT;
-        return -1;
-    }
-
-    memset(&sysctl, 0, sizeof(sysctl));
-    sysctl.cmd = XEN_SYSCTL_livepatch_op;
-    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST;
-    sysctl.u.livepatch.u.list.idx = start;
-
-    max_batch_sz = max;
-    name_sz = name_total_size;
-    metadata_sz = metadata_total_size;
-    *done = 0;
-    *left = 0;
-    do {
-        uint32_t _name_sz, _metadata_sz;
-
-        /*
-         * The first time we go in this loop our 'max' may be bigger
-         * than what the hypervisor is comfortable with - hence the first
-         * couple of loops may adjust the number of entries we will
-         * want filled (tracked by 'nr').
-         *
-         * N.B. This is a do { } while loop and the right hand side of
-         * the conditional when adjusting will evaluate to false (as
-         * *left is set to zero before the loop. Hence we need this
-         * adjust - even if we reset it at the start of the loop.
-         */
-        if ( adjust )
-            adjust = 0; /* Used when adjusting the 'max_batch_sz' or 'retries'. */
-
-        nr = min(max - *done, max_batch_sz);
-
-        sysctl.u.livepatch.u.list.nr = nr;
-        /* Fix the size (may vary between hypercalls). */
-        HYPERCALL_BOUNCE_SET_SIZE(info, nr * sizeof(*info));
-        HYPERCALL_BOUNCE_SET_SIZE(name, name_sz);
-        HYPERCALL_BOUNCE_SET_SIZE(len, nr * sizeof(*len));
-        HYPERCALL_BOUNCE_SET_SIZE(metadata, metadata_sz);
-        HYPERCALL_BOUNCE_SET_SIZE(metadata_len, nr * sizeof(*metadata_len));
-        /* Move the pointer to proper offset into 'info'. */
-        (HYPERCALL_BUFFER(info))->ubuf = info + *done;
-        (HYPERCALL_BUFFER(name))->ubuf = name + name_off;
-        (HYPERCALL_BUFFER(len))->ubuf = len + *done;
-        (HYPERCALL_BUFFER(metadata))->ubuf = metadata + metadata_off;
-        (HYPERCALL_BUFFER(metadata_len))->ubuf = metadata_len + *done;
-        /* Allocate memory. */
-        rc = xc_hypercall_bounce_pre(xch, info);
-        if ( rc )
-            break;
-
-        rc = xc_hypercall_bounce_pre(xch, name);
-        if ( rc )
-            break;
-
-        rc = xc_hypercall_bounce_pre(xch, len);
-        if ( rc )
-            break;
-
-        rc = xc_hypercall_bounce_pre(xch, metadata);
-        if ( rc )
-            break;
-
-        rc = xc_hypercall_bounce_pre(xch, metadata_len);
-        if ( rc )
-            break;
-
-        set_xen_guest_handle(sysctl.u.livepatch.u.list.status, info);
-        set_xen_guest_handle(sysctl.u.livepatch.u.list.name, name);
-        set_xen_guest_handle(sysctl.u.livepatch.u.list.len, len);
-        set_xen_guest_handle(sysctl.u.livepatch.u.list.metadata, metadata);
-        set_xen_guest_handle(sysctl.u.livepatch.u.list.metadata_len, metadata_len);
-
-        rc = do_sysctl(xch, &sysctl);
-        /*
-         * From here on we MUST call xc_hypercall_bounce. If rc < 0 we
-         * end up doing it (outside the loop), so using a break is OK.
-         */
-        if ( rc < 0 && errno == E2BIG )
-        {
-            if ( max_batch_sz <= 1 )
-                break;
-            max_batch_sz >>= 1;
-            adjust = 1; /* For the loop conditional to let us loop again. */
-            /* No memory leaks! */
-            xc_hypercall_bounce_post(xch, info);
-            xc_hypercall_bounce_post(xch, name);
-            xc_hypercall_bounce_post(xch, len);
-            xc_hypercall_bounce_post(xch, metadata);
-            xc_hypercall_bounce_post(xch, metadata_len);
-            continue;
-        }
-
-        if ( rc < 0 ) /* For all other errors we bail out. */
-            break;
-
-        if ( !version )
-            version = sysctl.u.livepatch.u.list.version;
-
-        if ( sysctl.u.livepatch.u.list.version != version )
-        {
-            /* We could make this configurable as parameter? */
-            if ( retries++ > 3 )
-            {
-                rc = -1;
-                errno = EBUSY;
-                break;
-            }
-            *done = 0; /* Retry from scratch. */
-            version = sysctl.u.livepatch.u.list.version;
-            adjust = 1; /* And make sure we continue in the loop. */
-            /* No memory leaks. */
-            xc_hypercall_bounce_post(xch, info);
-            xc_hypercall_bounce_post(xch, name);
-            xc_hypercall_bounce_post(xch, len);
-            xc_hypercall_bounce_post(xch, metadata);
-            xc_hypercall_bounce_post(xch, metadata_len);
-            continue;
-        }
-
-        /* We should never hit this, but just in case. */
-        if ( rc > nr )
-        {
-            errno = EOVERFLOW; /* Overflow! */
-            rc = -1;
-            break;
-        }
-        *left = sysctl.u.livepatch.u.list.nr; /* Total remaining count. */
-        _name_sz = sysctl.u.livepatch.u.list.name_total_size; /* Total received name size. */
-        _metadata_sz = sysctl.u.livepatch.u.list.metadata_total_size; /* Total received metadata size. */
-        /* Copy only up 'rc' of data' - we could add 'min(rc,nr) if desired. */
-        HYPERCALL_BOUNCE_SET_SIZE(info, (rc * sizeof(*info)));
-        HYPERCALL_BOUNCE_SET_SIZE(name, _name_sz);
-        HYPERCALL_BOUNCE_SET_SIZE(len, (rc * sizeof(*len)));
-        HYPERCALL_BOUNCE_SET_SIZE(metadata, _metadata_sz);
-        HYPERCALL_BOUNCE_SET_SIZE(metadata_len, (rc * sizeof(*metadata_len)));
-        /* Bounce the data and free the bounce buffer. */
-        xc_hypercall_bounce_post(xch, info);
-        xc_hypercall_bounce_post(xch, name);
-        xc_hypercall_bounce_post(xch, len);
-        xc_hypercall_bounce_post(xch, metadata);
-        xc_hypercall_bounce_post(xch, metadata_len);
-
-        name_sz -= _name_sz;
-        name_off += _name_sz;
-        metadata_sz -= _metadata_sz;
-        metadata_off += _metadata_sz;
-
-        /* And update how many elements of info we have copied into. */
-        *done += rc;
-        /* Update idx. */
-        sysctl.u.livepatch.u.list.idx = *done;
-    } while ( adjust || (*done < max && *left != 0) );
-
-    if ( rc < 0 )
-    {
-        xc_hypercall_bounce_post(xch, len);
-        xc_hypercall_bounce_post(xch, name);
-        xc_hypercall_bounce_post(xch, info);
-        xc_hypercall_bounce_post(xch, metadata);
-        xc_hypercall_bounce_post(xch, metadata_len);
-    }
-
-    return rc > 0 ? 0 : rc;
-}
-
-static int _xc_livepatch_action(xc_interface *xch,
-                                char *name,
-                                unsigned int action,
-                                uint32_t timeout,
-                                uint32_t flags)
-{
-    int rc;
-    DECLARE_SYSCTL;
-    /* The size is figured out when we strlen(name) */
-    DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    struct xen_livepatch_name def_name = { };
-
-    def_name.size = strlen(name) + 1;
-
-    if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
-
-    if ( xc_hypercall_bounce_pre(xch, name) )
-        return -1;
-
-    sysctl.cmd = XEN_SYSCTL_livepatch_op;
-    sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_ACTION;
-    sysctl.u.livepatch.pad = 0;
-    sysctl.u.livepatch.u.action.cmd = action;
-    sysctl.u.livepatch.u.action.timeout = timeout;
-    sysctl.u.livepatch.u.action.flags = flags;
-    sysctl.u.livepatch.u.action.pad = 0;
-
-    sysctl.u.livepatch.u.action.name = def_name;
-    set_xen_guest_handle(sysctl.u.livepatch.u.action.name.name, name);
-
-    rc = do_sysctl(xch, &sysctl);
-
-    xc_hypercall_bounce_post(xch, name);
-
-    return rc;
-}
-
-int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
-{
-    return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_APPLY, timeout, flags);
-}
-
-int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
-{
-    return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REVERT, timeout, flags);
-}
-
-int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
-{
-    return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_UNLOAD, timeout, flags);
-}
-
-int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
-{
-    return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REPLACE, timeout, flags);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
deleted file mode 100644 (file)
index 4ac823e..0000000
+++ /dev/null
@@ -1,257 +0,0 @@
-/******************************************************************************
- *
- * xc_monitor.c
- *
- * Interface to VM event monitor
- *
- * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port)
-{
-    return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
-                              port);
-}
-
-int xc_monitor_disable(xc_interface *xch, uint32_t domain_id)
-{
-    return xc_vm_event_control(xch, domain_id,
-                               XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
-                               NULL);
-}
-
-int xc_monitor_resume(xc_interface *xch, uint32_t domain_id)
-{
-    return xc_vm_event_control(xch, domain_id,
-                               XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
-                               NULL);
-}
-
-int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
-                                uint32_t *capabilities)
-{
-    int rc;
-    DECLARE_DOMCTL;
-
-    if ( !capabilities )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES;
-
-    rc = do_domctl(xch, &domctl);
-    if ( rc )
-        return rc;
-
-    *capabilities = domctl.u.monitor_op.event;
-    return 0;
-}
-
-int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
-                             uint16_t index, bool enable, bool sync,
-                             uint64_t bitmask, bool onchangeonly)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG;
-    domctl.u.monitor_op.u.mov_to_cr.index = index;
-    domctl.u.monitor_op.u.mov_to_cr.sync = sync;
-    domctl.u.monitor_op.u.mov_to_cr.onchangeonly = onchangeonly;
-    domctl.u.monitor_op.u.mov_to_cr.bitmask = bitmask;
-    domctl.u.monitor_op.u.mov_to_cr.pad1 = 0;
-    domctl.u.monitor_op.u.mov_to_cr.pad2 = 0;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
-                          bool enable, bool onchangeonly)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR;
-    domctl.u.monitor_op.u.mov_to_msr.msr = msr;
-    domctl.u.monitor_op.u.mov_to_msr.onchangeonly = onchangeonly;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
-                                   bool enable)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id,
-                          bool enable)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
-                                 bool enable)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id, bool enable,
-                             bool sync, bool allow_userspace)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST;
-    domctl.u.monitor_op.u.guest_request.sync = sync;
-    domctl.u.monitor_op.u.guest_request.allow_userspace = enable ? allow_userspace : false;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_inguest_pagefault(xc_interface *xch, uint32_t domain_id,
-                                bool disable)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = disable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
-                                bool enable)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP;
-    domctl.u.monitor_op.event = enable;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
-                                bool enable, bool sync)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION;
-    domctl.u.monitor_op.u.debug_exception.sync = sync;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_CPUID;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
-                               bool enable)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
-                                  bool enable)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_monitor_op;
-    domctl.domain = domain_id;
-    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
-                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
-    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED;
-
-    return do_domctl(xch, &domctl);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_msr_x86.h b/tools/libxc/xc_msr_x86.h
deleted file mode 100644 (file)
index 7f100e7..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * xc_msr_x86.h
- *
- * MSR definition macros
- *
- * Copyright (C) 2014      Intel Corporation
- * Author Dongxiao Xu <dongxiao.xu@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; version 2.1 only. with the special
- * exception on linking described in file LICENSE.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- */
-
-#ifndef XC_MSR_X86_H
-#define XC_MSR_X86_H
-
-#define MSR_IA32_TSC            0x00000010
-#define MSR_IA32_CMT_EVTSEL     0x00000c8d
-#define MSR_IA32_CMT_CTR        0x00000c8e
-
-#endif
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_netbsd.c b/tools/libxc/xc_netbsd.c
deleted file mode 100644 (file)
index 3197993..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/******************************************************************************
- *
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-#include <unistd.h>
-#include <fcntl.h>
-#include <malloc.h>
-
-/* Optionally flush file to disk and discard page cache */
-void discard_file_cache(xc_interface *xch, int fd, int flush) 
-{
-    off_t cur = 0;
-    int saved_errno = errno;
-
-    if ( flush && (fsync(fd) < 0) )
-    {
-        /*PERROR("Failed to flush file: %s", strerror(errno));*/
-        goto out;
-    }
-
-    /*
-     * Calculate last page boundry of amount written so far
-     * unless we are flushing in which case entire cache
-     * is discarded.
-     */
-    if ( !flush )
-    {
-        if ( ( cur = lseek(fd, 0, SEEK_CUR)) == (off_t)-1 )
-            cur = 0;
-        cur &= ~(PAGE_SIZE - 1);
-    }
-
-    /* Discard from the buffer cache. */
-    if ( posix_fadvise(fd, 0, cur, POSIX_FADV_DONTNEED) < 0 )
-    {
-        /*PERROR("Failed to discard cache: %s", strerror(errno));*/
-        goto out;
-    }
-
- out:
-    errno = saved_errno;
-}
-
-void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
-{
-    return valloc(size);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_pagetab.c b/tools/libxc/xc_pagetab.c
deleted file mode 100644 (file)
index db25c20..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-/******************************************************************************
- * xc_pagetab.c
- *
- * Function to translate virtual to physical addresses.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-#include <xen/hvm/save.h>
-
-#define CR0_PG  0x80000000
-#define CR4_PAE 0x20
-#define PTE_PSE 0x80
-#define EFER_LMA 0x400
-
-
-unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
-                                           int vcpu, unsigned long long virt)
-{
-    xc_dominfo_t dominfo;
-    uint64_t paddr, mask, pte = 0;
-    int size, level, pt_levels = 2;
-    void *map;
-
-    if (xc_domain_getinfo(xch, dom, 1, &dominfo) != 1 
-        || dominfo.domid != dom)
-        return 0;
-
-    /* What kind of paging are we dealing with? */
-    if (dominfo.hvm) {
-        struct hvm_hw_cpu ctx;
-        if (xc_domain_hvm_getcontext_partial(xch, dom,
-                                             HVM_SAVE_CODE(CPU), vcpu,
-                                             &ctx, sizeof ctx) != 0)
-            return 0;
-        if (!(ctx.cr0 & CR0_PG))
-            return virt >> PAGE_SHIFT;
-        pt_levels = (ctx.msr_efer&EFER_LMA) ? 4 : (ctx.cr4&CR4_PAE) ? 3 : 2;
-        paddr = ctx.cr3 & ((pt_levels == 3) ? ~0x1full : ~0xfffull);
-    } else {
-        unsigned int gwidth;
-        vcpu_guest_context_any_t ctx;
-        if (xc_vcpu_getcontext(xch, dom, vcpu, &ctx) != 0)
-            return 0;
-        if (xc_domain_get_guest_width(xch, dom, &gwidth) != 0)
-            return 0;
-        if (gwidth == 8) {
-            pt_levels = 4;
-            paddr = (uint64_t)xen_cr3_to_pfn_x86_64(ctx.x64.ctrlreg[3])
-                << PAGE_SHIFT;
-        } else {
-            pt_levels = 3;
-            paddr = (uint64_t)xen_cr3_to_pfn_x86_32(ctx.x32.ctrlreg[3])
-                << PAGE_SHIFT;
-        }
-    }
-
-    if (pt_levels == 4) {
-        virt &= 0x0000ffffffffffffull;
-        mask =  0x0000ff8000000000ull;
-    } else if (pt_levels == 3) {
-        virt &= 0x00000000ffffffffull;
-        mask =  0x0000007fc0000000ull;
-    } else {
-        virt &= 0x00000000ffffffffull;
-        mask =  0x00000000ffc00000ull;
-    }
-    size = (pt_levels == 2 ? 4 : 8);
-
-    /* Walk the pagetables */
-    for (level = pt_levels; level > 0; level--) {
-        paddr += ((virt & mask) >> (xc_ffs64(mask) - 1)) * size;
-        map = xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_READ, 
-                                   paddr >>PAGE_SHIFT);
-        if (!map) 
-            return 0;
-        memcpy(&pte, map + (paddr & (PAGE_SIZE - 1)), size);
-        munmap(map, PAGE_SIZE);
-        if (!(pte & 1)) {
-            errno = EADDRNOTAVAIL;
-            return 0;
-        }
-        paddr = pte & 0x000ffffffffff000ull;
-        if ((level == 2 || (level == 3 && pt_levels == 4)) && (pte & PTE_PSE)) {
-            mask = ((mask ^ ~-mask) >> 1); /* All bits below first set bit */
-            return ((paddr & ~mask) | (virt & mask)) >> PAGE_SHIFT;
-        }
-        mask >>= (pt_levels == 2 ? 10 : 9);
-    }
-    return paddr >> PAGE_SHIFT;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_physdev.c b/tools/libxc/xc_physdev.c
deleted file mode 100644 (file)
index 460a8e7..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-/******************************************************************************
- * xc_physdev.c
- *
- * API for manipulating physical-device access permissions.
- *
- * Copyright (c) 2004, Rolf Neugebauer (Intel Research Cambridge)
- * Copyright (c) 2004, K A Fraser (University of Cambridge)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-int xc_physdev_pci_access_modify(xc_interface *xch,
-                                 uint32_t domid,
-                                 int bus,
-                                 int dev,
-                                 int func,
-                                 int enable)
-{
-    errno = ENOSYS;
-    return -1;
-}
-
-int xc_physdev_map_pirq(xc_interface *xch,
-                        uint32_t domid,
-                        int index,
-                        int *pirq)
-{
-    int rc;
-    struct physdev_map_pirq map;
-
-    if ( !pirq )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    memset(&map, 0, sizeof(struct physdev_map_pirq));
-    map.domid = domid;
-    map.type = MAP_PIRQ_TYPE_GSI;
-    map.index = index;
-    map.pirq = *pirq < 0 ? index : *pirq;
-
-    rc = do_physdev_op(xch, PHYSDEVOP_map_pirq, &map, sizeof(map));
-
-    if ( !rc )
-        *pirq = map.pirq;
-
-    return rc;
-}
-
-int xc_physdev_map_pirq_msi(xc_interface *xch,
-                            uint32_t domid,
-                            int index,
-                            int *pirq,
-                            int devfn,
-                            int bus,
-                            int entry_nr,
-                            uint64_t table_base)
-{
-    int rc;
-    struct physdev_map_pirq map;
-
-    if ( !pirq )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    memset(&map, 0, sizeof(struct physdev_map_pirq));
-    map.domid = domid;
-    map.type = MAP_PIRQ_TYPE_MSI;
-    map.index = index;
-    map.pirq = *pirq;
-    map.bus = bus;
-    map.devfn = devfn;
-    map.entry_nr = entry_nr;
-    map.table_base = table_base;
-
-    rc = do_physdev_op(xch, PHYSDEVOP_map_pirq, &map, sizeof(map));
-
-    if ( !rc )
-        *pirq = map.pirq;
-
-    return rc;
-}
-
-int xc_physdev_unmap_pirq(xc_interface *xch,
-                          uint32_t domid,
-                          int pirq)
-{
-    int rc;
-    struct physdev_unmap_pirq unmap;
-
-    memset(&unmap, 0, sizeof(struct physdev_unmap_pirq));
-    unmap.domid = domid;
-    unmap.pirq = pirq;
-
-    rc = do_physdev_op(xch, PHYSDEVOP_unmap_pirq, &unmap, sizeof(unmap));
-
-    return rc;
-}
-
diff --git a/tools/libxc/xc_pm.c b/tools/libxc/xc_pm.c
deleted file mode 100644 (file)
index 76d7eb7..0000000
+++ /dev/null
@@ -1,455 +0,0 @@
-/******************************************************************************
- * xc_pm.c - Libxc API for Xen Power Management (Px/Cx/Tx, etc.) statistic
- *
- * Copyright (c) 2008, Liu Jinsong <jinsong.liu@intel.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-#include <stdbool.h>
-#include "xc_private.h"
-
-#include <xen-tools/libs.h>
-
-/*
- * Get PM statistic info
- */
-int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px)
-{
-    DECLARE_SYSCTL;
-    int ret;
-
-    sysctl.cmd = XEN_SYSCTL_get_pmstat;
-    sysctl.u.get_pmstat.type = PMSTAT_get_max_px;
-    sysctl.u.get_pmstat.cpuid = cpuid;
-    ret = xc_sysctl(xch, &sysctl);
-    if ( ret )
-        return ret;
-
-    *max_px = sysctl.u.get_pmstat.u.getpx.total;
-    return ret;
-}
-
-int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt)
-{
-    DECLARE_SYSCTL;
-    /* Sizes unknown until xc_pm_get_max_px */
-    DECLARE_NAMED_HYPERCALL_BOUNCE(trans, pxpt->trans_pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    DECLARE_NAMED_HYPERCALL_BOUNCE(pt, pxpt->pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    int max_px, ret;
-
-    if ( !pxpt->trans_pt || !pxpt->pt )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    if ( (ret = xc_pm_get_max_px(xch, cpuid, &max_px)) != 0)
-        return ret;
-
-    HYPERCALL_BOUNCE_SET_SIZE(trans, max_px * max_px * sizeof(uint64_t));
-    HYPERCALL_BOUNCE_SET_SIZE(pt, max_px * sizeof(struct xc_px_val));
-
-    if ( xc_hypercall_bounce_pre(xch, trans) )
-        return ret;
-
-    if ( xc_hypercall_bounce_pre(xch, pt) )
-    {
-        xc_hypercall_bounce_post(xch, trans);
-        return ret;
-    }
-
-    sysctl.cmd = XEN_SYSCTL_get_pmstat;
-    sysctl.u.get_pmstat.type = PMSTAT_get_pxstat;
-    sysctl.u.get_pmstat.cpuid = cpuid;
-    sysctl.u.get_pmstat.u.getpx.total = max_px;
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.trans_pt, trans);
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.pt, pt);
-
-    ret = xc_sysctl(xch, &sysctl);
-    if ( ret )
-    {
-       xc_hypercall_bounce_post(xch, trans);
-       xc_hypercall_bounce_post(xch, pt);
-        return ret;
-    }
-
-    pxpt->total = sysctl.u.get_pmstat.u.getpx.total;
-    pxpt->usable = sysctl.u.get_pmstat.u.getpx.usable;
-    pxpt->last = sysctl.u.get_pmstat.u.getpx.last;
-    pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur;
-
-    xc_hypercall_bounce_post(xch, trans);
-    xc_hypercall_bounce_post(xch, pt);
-
-    return ret;
-}
-
-int xc_pm_reset_pxstat(xc_interface *xch, int cpuid)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_get_pmstat;
-    sysctl.u.get_pmstat.type = PMSTAT_reset_pxstat;
-    sysctl.u.get_pmstat.cpuid = cpuid;
-
-    return xc_sysctl(xch, &sysctl);
-}
-
-int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx)
-{
-    DECLARE_SYSCTL;
-    int ret = 0;
-
-    sysctl.cmd = XEN_SYSCTL_get_pmstat;
-    sysctl.u.get_pmstat.type = PMSTAT_get_max_cx;
-    sysctl.u.get_pmstat.cpuid = cpuid;
-    if ( (ret = xc_sysctl(xch, &sysctl)) != 0 )
-        return ret;
-
-    *max_cx = sysctl.u.get_pmstat.u.getcx.nr;
-    return ret;
-}
-
-int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt)
-{
-    DECLARE_SYSCTL;
-    DECLARE_NAMED_HYPERCALL_BOUNCE(triggers, cxpt->triggers,
-                                   cxpt->nr * sizeof(*cxpt->triggers),
-                                   XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_NAMED_HYPERCALL_BOUNCE(residencies, cxpt->residencies,
-                                   cxpt->nr * sizeof(*cxpt->residencies),
-                                   XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_NAMED_HYPERCALL_BOUNCE(pc, cxpt->pc,
-                                   cxpt->nr_pc * sizeof(*cxpt->pc),
-                                   XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_NAMED_HYPERCALL_BOUNCE(cc, cxpt->cc,
-                                   cxpt->nr_cc * sizeof(*cxpt->cc),
-                                   XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    int ret = -1;
-
-    if ( xc_hypercall_bounce_pre(xch, triggers) )
-        goto unlock_0;
-    if ( xc_hypercall_bounce_pre(xch, residencies) )
-        goto unlock_1;
-    if ( xc_hypercall_bounce_pre(xch, pc) )
-        goto unlock_2;
-    if ( xc_hypercall_bounce_pre(xch, cc) )
-        goto unlock_3;
-
-    sysctl.cmd = XEN_SYSCTL_get_pmstat;
-    sysctl.u.get_pmstat.type = PMSTAT_get_cxstat;
-    sysctl.u.get_pmstat.cpuid = cpuid;
-    sysctl.u.get_pmstat.u.getcx.nr = cxpt->nr;
-    sysctl.u.get_pmstat.u.getcx.nr_pc = cxpt->nr_pc;
-    sysctl.u.get_pmstat.u.getcx.nr_cc = cxpt->nr_cc;
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.triggers, triggers);
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.residencies, residencies);
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.pc, pc);
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.cc, cc);
-
-    if ( (ret = xc_sysctl(xch, &sysctl)) )
-        goto unlock_4;
-
-    cxpt->nr = sysctl.u.get_pmstat.u.getcx.nr;
-    cxpt->last = sysctl.u.get_pmstat.u.getcx.last;
-    cxpt->idle_time = sysctl.u.get_pmstat.u.getcx.idle_time;
-    cxpt->nr_pc = sysctl.u.get_pmstat.u.getcx.nr_pc;
-    cxpt->nr_cc = sysctl.u.get_pmstat.u.getcx.nr_cc;
-
-unlock_4:
-    xc_hypercall_bounce_post(xch, cc);
-unlock_3:
-    xc_hypercall_bounce_post(xch, pc);
-unlock_2:
-    xc_hypercall_bounce_post(xch, residencies);
-unlock_1:
-    xc_hypercall_bounce_post(xch, triggers);
-unlock_0:
-    return ret;
-}
-
-int xc_pm_reset_cxstat(xc_interface *xch, int cpuid)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_get_pmstat;
-    sysctl.u.get_pmstat.type = PMSTAT_reset_cxstat;
-    sysctl.u.get_pmstat.cpuid = cpuid;
-
-    return xc_sysctl(xch, &sysctl);
-}
-
-
-/*
- * 1. Get PM parameter
- * 2. Provide user PM control
- */
-int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
-                        struct xc_get_cpufreq_para *user_para)
-{
-    DECLARE_SYSCTL;
-    int ret = 0;
-    struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para;
-    DECLARE_NAMED_HYPERCALL_BOUNCE(affected_cpus,
-                        user_para->affected_cpus,
-                        user_para->cpu_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_frequencies,
-                        user_para->scaling_available_frequencies,
-                        user_para->freq_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_governors,
-                        user_para->scaling_available_governors,
-                        user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    bool has_num = user_para->cpu_num &&
-                     user_para->freq_num &&
-                     user_para->gov_num;
-
-    if ( has_num )
-    {
-        if ( (!user_para->affected_cpus)                    ||
-             (!user_para->scaling_available_frequencies)    ||
-             (!user_para->scaling_available_governors) )
-        {
-            errno = EINVAL;
-            return -1;
-        }
-        if ( xc_hypercall_bounce_pre(xch, affected_cpus) )
-            goto unlock_1;
-        if ( xc_hypercall_bounce_pre(xch, scaling_available_frequencies) )
-            goto unlock_2;
-        if ( xc_hypercall_bounce_pre(xch, scaling_available_governors) )
-            goto unlock_3;
-
-        set_xen_guest_handle(sys_para->affected_cpus, affected_cpus);
-        set_xen_guest_handle(sys_para->scaling_available_frequencies, scaling_available_frequencies);
-        set_xen_guest_handle(sys_para->scaling_available_governors, scaling_available_governors);
-    }
-
-    sysctl.cmd = XEN_SYSCTL_pm_op;
-    sysctl.u.pm_op.cmd = GET_CPUFREQ_PARA;
-    sysctl.u.pm_op.cpuid = cpuid;
-    sys_para->cpu_num  = user_para->cpu_num;
-    sys_para->freq_num = user_para->freq_num;
-    sys_para->gov_num  = user_para->gov_num;
-
-    ret = xc_sysctl(xch, &sysctl);
-    if ( ret )
-    {
-        if ( errno == EAGAIN )
-        {
-            user_para->cpu_num  = sys_para->cpu_num;
-            user_para->freq_num = sys_para->freq_num;
-            user_para->gov_num  = sys_para->gov_num;
-            ret = -errno;
-        }
-
-        if ( has_num )
-            goto unlock_4;
-        goto unlock_1;
-    }
-    else
-    {
-        user_para->cpuinfo_cur_freq = sys_para->cpuinfo_cur_freq;
-        user_para->cpuinfo_max_freq = sys_para->cpuinfo_max_freq;
-        user_para->cpuinfo_min_freq = sys_para->cpuinfo_min_freq;
-        user_para->scaling_cur_freq = sys_para->scaling_cur_freq;
-        user_para->scaling_max_freq = sys_para->scaling_max_freq;
-        user_para->scaling_min_freq = sys_para->scaling_min_freq;
-        user_para->turbo_enabled    = sys_para->turbo_enabled;
-
-        memcpy(user_para->scaling_driver,
-                sys_para->scaling_driver, CPUFREQ_NAME_LEN);
-        memcpy(user_para->scaling_governor,
-                sys_para->scaling_governor, CPUFREQ_NAME_LEN);
-
-        /* copy to user_para no matter what cpufreq governor */
-        BUILD_BUG_ON(sizeof(((struct xc_get_cpufreq_para *)0)->u) !=
-                    sizeof(((struct xen_get_cpufreq_para *)0)->u));
-
-        memcpy(&user_para->u, &sys_para->u, sizeof(sys_para->u));
-    }
-
-unlock_4:
-    xc_hypercall_bounce_post(xch, scaling_available_governors);
-unlock_3:
-    xc_hypercall_bounce_post(xch, scaling_available_frequencies);
-unlock_2:
-    xc_hypercall_bounce_post(xch, affected_cpus);
-unlock_1:
-    return ret;
-}
-
-int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname)
-{
-    DECLARE_SYSCTL;
-    char *scaling_governor = sysctl.u.pm_op.u.set_gov.scaling_governor;
-
-    if ( !xch || !govname )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    sysctl.cmd = XEN_SYSCTL_pm_op;
-    sysctl.u.pm_op.cmd = SET_CPUFREQ_GOV;
-    sysctl.u.pm_op.cpuid = cpuid;
-    strncpy(scaling_governor, govname, CPUFREQ_NAME_LEN - 1);
-    scaling_governor[CPUFREQ_NAME_LEN - 1] = '\0';
-
-    return xc_sysctl(xch, &sysctl);
-}
-
-int xc_set_cpufreq_para(xc_interface *xch, int cpuid, 
-                        int ctrl_type, int ctrl_value)
-{
-    DECLARE_SYSCTL;
-
-    if ( !xch )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    sysctl.cmd = XEN_SYSCTL_pm_op;
-    sysctl.u.pm_op.cmd = SET_CPUFREQ_PARA;
-    sysctl.u.pm_op.cpuid = cpuid;
-    sysctl.u.pm_op.u.set_para.ctrl_type = ctrl_type;
-    sysctl.u.pm_op.u.set_para.ctrl_value = ctrl_value;
-
-    return xc_sysctl(xch, &sysctl);
-}
-
-int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq)
-{
-    int ret = 0;
-    DECLARE_SYSCTL;
-
-    if ( !xch || !avg_freq )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    sysctl.cmd = XEN_SYSCTL_pm_op;
-    sysctl.u.pm_op.cmd = GET_CPUFREQ_AVGFREQ;
-    sysctl.u.pm_op.cpuid = cpuid;
-    ret = xc_sysctl(xch, &sysctl);
-
-    *avg_freq = sysctl.u.pm_op.u.get_avgfreq;
-
-    return ret;
-}
-
-/* value:   0 - disable sched_smt_power_savings 
-            1 - enable sched_smt_power_savings
- */
-int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value)
-{
-   int rc;
-   DECLARE_SYSCTL;
-
-   sysctl.cmd = XEN_SYSCTL_pm_op;
-   sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_sched_opt_smt;
-   sysctl.u.pm_op.cpuid = 0;
-   sysctl.u.pm_op.u.set_sched_opt_smt = value;
-   rc = do_sysctl(xch, &sysctl);
-
-   return rc;
-}
-
-static int get_max_cstate(xc_interface *xch, uint32_t *value, uint32_t type)
-{
-    int rc;
-    DECLARE_SYSCTL;
-
-    if ( !xch || !value )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    sysctl.cmd = XEN_SYSCTL_pm_op;
-    sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_get_max_cstate;
-    sysctl.u.pm_op.cpuid = type;
-    sysctl.u.pm_op.u.get_max_cstate = 0;
-    rc = do_sysctl(xch, &sysctl);
-    *value = sysctl.u.pm_op.u.get_max_cstate;
-
-    return rc;
-}
-
-int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value)
-{
-    return get_max_cstate(xch, value, 0);
-}
-
-int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value)
-{
-    return get_max_cstate(xch, value, 1);
-}
-
-static int set_max_cstate(xc_interface *xch, uint32_t value, uint32_t type)
-{
-    DECLARE_SYSCTL;
-
-    if ( !xch )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    sysctl.cmd = XEN_SYSCTL_pm_op;
-    sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_max_cstate;
-    sysctl.u.pm_op.cpuid = type;
-    sysctl.u.pm_op.u.set_max_cstate = value;
-
-    return do_sysctl(xch, &sysctl);
-}
-
-int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value)
-{
-    return set_max_cstate(xch, value, 0);
-}
-
-int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value)
-{
-    return set_max_cstate(xch, value, 1);
-}
-
-int xc_enable_turbo(xc_interface *xch, int cpuid)
-{
-    DECLARE_SYSCTL;
-
-    if ( !xch )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    sysctl.cmd = XEN_SYSCTL_pm_op;
-    sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_enable_turbo;
-    sysctl.u.pm_op.cpuid = cpuid;
-    return do_sysctl(xch, &sysctl);
-}
-
-int xc_disable_turbo(xc_interface *xch, int cpuid)
-{
-    DECLARE_SYSCTL;
-
-    if ( !xch )
-    {
-        errno = EINVAL;
-        return -1;
-    }
-    sysctl.cmd = XEN_SYSCTL_pm_op;
-    sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_disable_turbo;
-    sysctl.u.pm_op.cpuid = cpuid;
-    return do_sysctl(xch, &sysctl);
-}
diff --git a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c
deleted file mode 100644 (file)
index 8af96b1..0000000
+++ /dev/null
@@ -1,781 +0,0 @@
-/******************************************************************************
- * xc_private.c
- *
- * Helper functions for the rest of the library.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-#include "xenctrl_dom.h"
-#include <stdarg.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <pthread.h>
-#include <assert.h>
-
-struct xc_interface_core *xc_interface_open(xentoollog_logger *logger,
-                                            xentoollog_logger *dombuild_logger,
-                                            unsigned open_flags)
-{
-    struct xc_interface_core xch_buf = { 0 }, *xch = &xch_buf;
-
-    xch->flags = open_flags;
-    xch->dombuild_logger_file = 0;
-    xc_clear_last_error(xch);
-
-    xch->error_handler   = logger;           xch->error_handler_tofree   = 0;
-    xch->dombuild_logger = dombuild_logger;  xch->dombuild_logger_tofree = 0;
-
-    if (!xch->error_handler) {
-        xch->error_handler = xch->error_handler_tofree =
-            (xentoollog_logger*)
-            xtl_createlogger_stdiostream(stderr, XTL_PROGRESS, 0);
-        if (!xch->error_handler)
-            goto err;
-    }
-
-    xch = malloc(sizeof(*xch));
-    if (!xch) {
-        xch = &xch_buf;
-        PERROR("Could not allocate new xc_interface struct");
-        goto err;
-    }
-    *xch = xch_buf;
-
-    if (open_flags & XC_OPENFLAG_DUMMY)
-        return xch; /* We are done */
-
-    xch->xcall = xencall_open(xch->error_handler,
-        open_flags & XC_OPENFLAG_NON_REENTRANT ? XENCALL_OPENFLAG_NON_REENTRANT : 0U);
-    if ( xch->xcall == NULL )
-        goto err;
-
-    xch->fmem = xenforeignmemory_open(xch->error_handler, 0);
-    if ( xch->fmem == NULL )
-        goto err;
-
-    xch->dmod = xendevicemodel_open(xch->error_handler, 0);
-    if ( xch->dmod == NULL )
-        goto err;
-
-    return xch;
-
- err:
-    xenforeignmemory_close(xch->fmem);
-    xencall_close(xch->xcall);
-    xtl_logger_destroy(xch->error_handler_tofree);
-    if (xch != &xch_buf) free(xch);
-    return NULL;
-}
-
-int xc_interface_close(xc_interface *xch)
-{
-    int rc = 0;
-
-    if (!xch)
-        return 0;
-
-    rc = xencall_close(xch->xcall);
-    if (rc) PERROR("Could not close xencall interface");
-
-    rc = xenforeignmemory_close(xch->fmem);
-    if (rc) PERROR("Could not close foreign memory interface");
-
-    rc = xendevicemodel_close(xch->dmod);
-    if (rc) PERROR("Could not close device model interface");
-
-    xtl_logger_destroy(xch->dombuild_logger_tofree);
-    xtl_logger_destroy(xch->error_handler_tofree);
-
-    free(xch);
-    return rc;
-}
-
-xencall_handle *xc_interface_xcall_handle(xc_interface *xch)
-{
-    return xch->xcall;
-}
-
-struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch)
-{
-    return xch->fmem;
-}
-
-struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch)
-{
-    return xch->dmod;
-}
-
-static pthread_key_t errbuf_pkey;
-static pthread_once_t errbuf_pkey_once = PTHREAD_ONCE_INIT;
-
-const xc_error *xc_get_last_error(xc_interface *xch)
-{
-    return &xch->last_error;
-}
-
-void xc_clear_last_error(xc_interface *xch)
-{
-    xch->last_error.code = XC_ERROR_NONE;
-    xch->last_error.message[0] = '\0';
-}
-
-const char *xc_error_code_to_desc(int code)
-{
-    /* Sync to members of xc_error_code enumeration in xenctrl.h */
-    switch ( code )
-    {
-    case XC_ERROR_NONE:
-        return "No error details";
-    case XC_INTERNAL_ERROR:
-        return "Internal error";
-    case XC_INVALID_KERNEL:
-        return "Invalid kernel";
-    case XC_INVALID_PARAM:
-        return "Invalid configuration";
-    case XC_OUT_OF_MEMORY:
-        return "Out of memory";
-    }
-
-    return "Unknown error code";
-}
-
-void xc_reportv(xc_interface *xch, xentoollog_logger *lg,
-                xentoollog_level level, int code,
-                const char *fmt, va_list args) {
-    int saved_errno = errno;
-    char msgbuf[XC_MAX_ERROR_MSG_LEN];
-    char *msg;
-
-    /* Strip newlines from messages.
-     * XXX really the messages themselves should have the newlines removed.
-     */
-    char fmt_nonewline[512];
-    int fmt_l;
-
-    fmt_l = strlen(fmt);
-    if (fmt_l && fmt[fmt_l-1]=='\n' && fmt_l < sizeof(fmt_nonewline)) {
-        memcpy(fmt_nonewline, fmt, fmt_l-1);
-        fmt_nonewline[fmt_l-1] = 0;
-        fmt = fmt_nonewline;
-    }
-
-    if ( level >= XTL_ERROR ) {
-        msg = xch->last_error.message;
-        xch->last_error.code = code;
-    } else {
-        msg = msgbuf;
-    }
-    vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
-    msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
-
-    xtl_log(lg, level, -1, "xc",
-            "%s" "%s%s", msg,
-            code?": ":"", code ? xc_error_code_to_desc(code) : "");
-
-    errno = saved_errno;
-}
-
-void xc_report(xc_interface *xch, xentoollog_logger *lg,
-               xentoollog_level level, int code, const char *fmt, ...) {
-    va_list args;
-    va_start(args,fmt);
-    xc_reportv(xch,lg,level,code,fmt,args);
-    va_end(args);
-}
-
-void xc_report_error(xc_interface *xch, int code, const char *fmt, ...)
-{
-    va_list args;
-    va_start(args, fmt);
-    xc_reportv(xch, xch->error_handler, XTL_ERROR, code, fmt, args);
-    va_end(args);
-}
-
-const char *xc_set_progress_prefix(xc_interface *xch, const char *doing)
-{
-    const char *old = xch->currently_progress_reporting;
-
-    xch->currently_progress_reporting = doing;
-    return old;
-}
-
-void xc_report_progress_single(xc_interface *xch, const char *doing)
-{
-    assert(doing);
-    xtl_progress(xch->error_handler, "xc", doing, 0, 0);
-}
-
-void xc_report_progress_step(xc_interface *xch,
-                             unsigned long done, unsigned long total)
-{
-    assert(xch->currently_progress_reporting);
-    xtl_progress(xch->error_handler, "xc",
-                 xch->currently_progress_reporting, done, total);
-}
-
-int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
-                          unsigned int num, xen_pfn_t *arr)
-{
-    int rc;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(arr, sizeof(*arr) * num, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    if ( xc_hypercall_bounce_pre(xch, arr) )
-        return -1;
-    domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
-    domctl.domain = dom;
-    domctl.u.getpageframeinfo3.num = num;
-    set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
-    rc = do_domctl_retry_efault(xch, &domctl);
-    xc_hypercall_bounce_post(xch, arr);
-    return rc;
-}
-
-int xc_mmuext_op(
-    xc_interface *xch,
-    struct mmuext_op *op,
-    unsigned int nr_ops,
-    uint32_t dom)
-{
-    DECLARE_HYPERCALL_BOUNCE(op, nr_ops*sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    long ret = -1;
-
-    if ( xc_hypercall_bounce_pre(xch, op) )
-    {
-        PERROR("Could not bounce memory for mmuext op hypercall");
-        goto out1;
-    }
-
-    ret = xencall4(xch->xcall, __HYPERVISOR_mmuext_op,
-                   HYPERCALL_BUFFER_AS_ARG(op),
-                   nr_ops, 0, dom);
-
-    xc_hypercall_bounce_post(xch, op);
-
- out1:
-    return ret;
-}
-
-static int flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
-{
-    int rc, err = 0;
-    DECLARE_NAMED_HYPERCALL_BOUNCE(updates, mmu->updates, mmu->idx*sizeof(*mmu->updates), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( mmu->idx == 0 )
-        return 0;
-
-    if ( xc_hypercall_bounce_pre(xch, updates) )
-    {
-        PERROR("flush_mmu_updates: bounce buffer failed");
-        err = 1;
-        goto out;
-    }
-
-    rc = xencall4(xch->xcall, __HYPERVISOR_mmu_update,
-                  HYPERCALL_BUFFER_AS_ARG(updates),
-                  mmu->idx, 0, mmu->subject);
-    if ( rc < 0 )
-    {
-        ERROR("Failure when submitting mmu updates");
-        err = 1;
-    }
-
-    mmu->idx = 0;
-
-    xc_hypercall_bounce_post(xch, updates);
-
- out:
-    return err;
-}
-
-struct xc_mmu *xc_alloc_mmu_updates(xc_interface *xch, unsigned int subject)
-{
-    struct xc_mmu *mmu = malloc(sizeof(*mmu));
-    if ( mmu == NULL )
-        return mmu;
-    mmu->idx     = 0;
-    mmu->subject = subject;
-    return mmu;
-}
-
-int xc_add_mmu_update(xc_interface *xch, struct xc_mmu *mmu,
-                      unsigned long long ptr, unsigned long long val)
-{
-    mmu->updates[mmu->idx].ptr = ptr;
-    mmu->updates[mmu->idx].val = val;
-
-    if ( ++mmu->idx == MAX_MMU_UPDATES )
-        return flush_mmu_updates(xch, mmu);
-
-    return 0;
-}
-
-int xc_flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
-{
-    return flush_mmu_updates(xch, mmu);
-}
-
-long do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len)
-{
-    DECLARE_HYPERCALL_BOUNCE(arg, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-    long ret = -1;
-
-    if ( xc_hypercall_bounce_pre(xch, arg) )
-    {
-        PERROR("Could not bounce memory for XENMEM hypercall");
-        goto out1;
-    }
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_memory_op,
-                   cmd, HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_bounce_post(xch, arg);
- out1:
-    return ret;
-}
-
-int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn)
-{
-    long rc = do_memory_op(xch, XENMEM_maximum_ram_page, NULL, 0);
-
-    if ( rc >= 0 )
-    {
-        *max_mfn = rc;
-        rc = 0;
-    }
-    return rc;
-}
-
-long long xc_domain_get_cpu_usage(xc_interface *xch, uint32_t domid, int vcpu)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_getvcpuinfo;
-    domctl.domain = domid;
-    domctl.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
-    if ( (do_domctl(xch, &domctl) < 0) )
-    {
-        PERROR("Could not get info on domain");
-        return -1;
-    }
-    return domctl.u.getvcpuinfo.cpu_time;
-}
-
-int xc_machphys_mfn_list(xc_interface *xch,
-                        unsigned long max_extents,
-                        xen_pfn_t *extent_start)
-{
-    int rc;
-    DECLARE_HYPERCALL_BOUNCE(extent_start, max_extents * sizeof(xen_pfn_t), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    struct xen_machphys_mfn_list xmml = {
-        .max_extents = max_extents,
-    };
-
-    if ( xc_hypercall_bounce_pre(xch, extent_start) )
-    {
-        PERROR("Could not bounce memory for XENMEM_machphys_mfn_list hypercall");
-        return -1;
-    }
-
-    set_xen_guest_handle(xmml.extent_start, extent_start);
-    rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
-    if (rc || xmml.nr_extents != max_extents)
-        rc = -1;
-    else
-        rc = 0;
-
-    xc_hypercall_bounce_post(xch, extent_start);
-
-    return rc;
-}
-
-long xc_get_tot_pages(xc_interface *xch, uint32_t domid)
-{
-    xc_dominfo_t info;
-    if ( (xc_domain_getinfo(xch, domid, 1, &info) != 1) ||
-         (info.domid != domid) )
-        return -1;
-    return info.nr_pages;
-}
-
-int xc_copy_to_domain_page(xc_interface *xch,
-                           uint32_t domid,
-                           unsigned long dst_pfn,
-                           const char *src_page)
-{
-    void *vaddr = xc_map_foreign_range(
-        xch, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
-    if ( vaddr == NULL )
-        return -1;
-    memcpy(vaddr, src_page, PAGE_SIZE);
-    munmap(vaddr, PAGE_SIZE);
-    xc_domain_cacheflush(xch, domid, dst_pfn, 1);
-    return 0;
-}
-
-int xc_clear_domain_pages(xc_interface *xch,
-                          uint32_t domid,
-                          unsigned long dst_pfn,
-                          int num)
-{
-    size_t size = num * PAGE_SIZE;
-    void *vaddr = xc_map_foreign_range(
-        xch, domid, size, PROT_WRITE, dst_pfn);
-    if ( vaddr == NULL )
-        return -1;
-    memset(vaddr, 0, size);
-    munmap(vaddr, size);
-    xc_domain_cacheflush(xch, domid, dst_pfn, num);
-    return 0;
-}
-
-int xc_domctl(xc_interface *xch, struct xen_domctl *domctl)
-{
-    return do_domctl(xch, domctl);
-}
-
-int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl)
-{
-    return do_sysctl(xch, sysctl);
-}
-
-int xc_version(xc_interface *xch, int cmd, void *arg)
-{
-    DECLARE_HYPERCALL_BOUNCE(arg, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT); /* Size unknown until cmd decoded */
-    size_t sz;
-    int rc;
-
-    switch ( cmd )
-    {
-    case XENVER_version:
-        sz = 0;
-        break;
-    case XENVER_extraversion:
-        sz = sizeof(xen_extraversion_t);
-        break;
-    case XENVER_compile_info:
-        sz = sizeof(xen_compile_info_t);
-        break;
-    case XENVER_capabilities:
-        sz = sizeof(xen_capabilities_info_t);
-        break;
-    case XENVER_changeset:
-        sz = sizeof(xen_changeset_info_t);
-        break;
-    case XENVER_platform_parameters:
-        sz = sizeof(xen_platform_parameters_t);
-        break;
-    case XENVER_get_features:
-        sz = sizeof(xen_feature_info_t);
-        break;
-    case XENVER_pagesize:
-        sz = 0;
-        break;
-    case XENVER_guest_handle:
-        sz = sizeof(xen_domain_handle_t);
-        break;
-    case XENVER_commandline:
-        sz = sizeof(xen_commandline_t);
-        break;
-    case XENVER_build_id:
-        {
-            xen_build_id_t *build_id = (xen_build_id_t *)arg;
-            sz = sizeof(*build_id) + build_id->len;
-            HYPERCALL_BOUNCE_SET_DIR(arg, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-            break;
-        }
-    default:
-        ERROR("xc_version: unknown command %d\n", cmd);
-        return -EINVAL;
-    }
-
-    HYPERCALL_BOUNCE_SET_SIZE(arg, sz);
-
-    if ( (sz != 0) && xc_hypercall_bounce_pre(xch, arg) )
-    {
-        PERROR("Could not bounce buffer for version hypercall");
-        return -ENOMEM;
-    }
-
-    rc = do_xen_version(xch, cmd, HYPERCALL_BUFFER(arg));
-
-    if ( sz != 0 )
-        xc_hypercall_bounce_post(xch, arg);
-
-    return rc;
-}
-
-unsigned long xc_make_page_below_4G(
-    xc_interface *xch, uint32_t domid, unsigned long mfn)
-{
-    xen_pfn_t old_mfn = mfn;
-    xen_pfn_t new_mfn;
-
-    if ( xc_domain_decrease_reservation_exact(
-        xch, domid, 1, 0, &old_mfn) != 0 )
-    {
-        DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
-        return 0;
-    }
-
-    if ( xc_domain_increase_reservation_exact(
-        xch, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 )
-    {
-        DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
-        return 0;
-    }
-
-    return new_mfn;
-}
-
-static void
-_xc_clean_errbuf(void * m)
-{
-    free(m);
-    pthread_setspecific(errbuf_pkey, NULL);
-}
-
-static void
-_xc_init_errbuf(void)
-{
-    pthread_key_create(&errbuf_pkey, _xc_clean_errbuf);
-}
-
-const char *xc_strerror(xc_interface *xch, int errcode)
-{
-    if ( xch->flags & XC_OPENFLAG_NON_REENTRANT )
-    {
-        return strerror(errcode);
-    }
-    else
-    {
-#define XS_BUFSIZE 32
-        char *errbuf;
-        static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-        char *strerror_str;
-
-        pthread_once(&errbuf_pkey_once, _xc_init_errbuf);
-
-        errbuf = pthread_getspecific(errbuf_pkey);
-        if (errbuf == NULL) {
-            errbuf = malloc(XS_BUFSIZE);
-            if ( errbuf == NULL )
-                return "(failed to allocate errbuf)";
-            pthread_setspecific(errbuf_pkey, errbuf);
-        }
-
-        /*
-         * Thread-unsafe strerror() is protected by a local mutex. We copy the
-         * string to a thread-private buffer before releasing the mutex.
-         */
-        pthread_mutex_lock(&mutex);
-        strerror_str = strerror(errcode);
-        strncpy(errbuf, strerror_str, XS_BUFSIZE);
-        errbuf[XS_BUFSIZE-1] = '\0';
-        pthread_mutex_unlock(&mutex);
-
-        return errbuf;
-    }
-}
-
-void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits)
-{
-    uint64_t l;
-    int i, j, b;
-
-    for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
-        l = lp[i];
-        for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
-            bp[b+j] = l;
-            l >>= 8;
-            nbits -= 8;
-        }
-    }
-}
-
-void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits)
-{
-    uint64_t l;
-    int i, j, b;
-
-    for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
-        l = 0;
-        for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
-            l |= (uint64_t)bp[b+j] << (j*8);
-            nbits -= 8;
-        }
-        lp[i] = l;
-    }
-}
-
-int read_exact(int fd, void *data, size_t size)
-{
-    size_t offset = 0;
-    ssize_t len;
-
-    while ( offset < size )
-    {
-        len = read(fd, (char *)data + offset, size - offset);
-        if ( (len == -1) && (errno == EINTR) )
-            continue;
-        if ( len == 0 )
-            errno = 0;
-        if ( len <= 0 )
-            return -1;
-        offset += len;
-    }
-
-    return 0;
-}
-
-int write_exact(int fd, const void *data, size_t size)
-{
-    size_t offset = 0;
-    ssize_t len;
-
-    while ( offset < size )
-    {
-        len = write(fd, (const char *)data + offset, size - offset);
-        if ( (len == -1) && (errno == EINTR) )
-            continue;
-        if ( len <= 0 )
-            return -1;
-        offset += len;
-    }
-
-    return 0;
-}
-
-#if defined(__MINIOS__)
-/*
- * MiniOS's libc doesn't know about writev(). Implement it as multiple write()s.
- */
-int writev_exact(int fd, const struct iovec *iov, int iovcnt)
-{
-    int rc, i;
-
-    for ( i = 0; i < iovcnt; ++i )
-    {
-        rc = write_exact(fd, iov[i].iov_base, iov[i].iov_len);
-        if ( rc )
-            return rc;
-    }
-
-    return 0;
-}
-#else
-int writev_exact(int fd, const struct iovec *iov, int iovcnt)
-{
-    struct iovec *local_iov = NULL;
-    int rc = 0, iov_idx = 0, saved_errno = 0;
-    ssize_t len;
-
-    while ( iov_idx < iovcnt )
-    {
-        /*
-         * Skip over iov[] entries with 0 length.
-         *
-         * This is needed to cover the case where we took a partial write and
-         * all remaining vectors are of 0 length.  In such a case, the results
-         * from writev() are indistinguishable from EOF.
-         */
-        while ( iov[iov_idx].iov_len == 0 )
-            if ( ++iov_idx == iovcnt )
-                goto out;
-
-        len = writev(fd, &iov[iov_idx], min(iovcnt - iov_idx, IOV_MAX));
-        saved_errno = errno;
-
-        if ( (len == -1) && (errno == EINTR) )
-            continue;
-        if ( len <= 0 )
-        {
-            rc = -1;
-            goto out;
-        }
-
-        /* Check iov[] to see whether we had a partial or complete write. */
-        while ( (len > 0) && (iov_idx < iovcnt) )
-        {
-            if ( len >= iov[iov_idx].iov_len )
-                len -= iov[iov_idx++].iov_len;
-            else
-            {
-                /* Partial write of iov[iov_idx]. Copy iov so we can adjust
-                 * element iov_idx and resubmit the rest. */
-                if ( !local_iov )
-                {
-                    local_iov = malloc(iovcnt * sizeof(*iov));
-                    if ( !local_iov )
-                    {
-                        saved_errno = ENOMEM;
-                        goto out;
-                    }
-
-                    iov = memcpy(local_iov, iov, iovcnt * sizeof(*iov));
-                }
-
-                local_iov[iov_idx].iov_base += len;
-                local_iov[iov_idx].iov_len  -= len;
-                break;
-            }
-        }
-    }
-
-    saved_errno = 0;
-
- out:
-    free(local_iov);
-    errno = saved_errno;
-    return rc;
-}
-#endif
-
-int xc_ffs8(uint8_t x)
-{
-    int i;
-    for ( i = 0; i < 8; i++ )
-        if ( x & (1u << i) )
-            return i+1;
-    return 0;
-}
-
-int xc_ffs16(uint16_t x)
-{
-    uint8_t h = x>>8, l = x;
-    return l ? xc_ffs8(l) : h ? xc_ffs8(h) + 8 : 0;
-}
-
-int xc_ffs32(uint32_t x)
-{
-    uint16_t h = x>>16, l = x;
-    return l ? xc_ffs16(l) : h ? xc_ffs16(h) + 16 : 0;
-}
-
-int xc_ffs64(uint64_t x)
-{
-    uint32_t h = x>>32, l = x;
-    return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
deleted file mode 100644 (file)
index f0b5f83..0000000
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef XC_PRIVATE_H
-#define XC_PRIVATE_H
-
-#include <unistd.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <stdlib.h>
-#include <limits.h>
-#include <sys/ioctl.h>
-
-#include "_paths.h"
-
-#define XC_WANT_COMPAT_MAP_FOREIGN_API
-#define XC_INTERNAL_COMPAT_MAP_FOREIGN_API
-#include "xenctrl.h"
-
-#include <xencall.h>
-#include <xenforeignmemory.h>
-#include <xendevicemodel.h>
-
-#include <xen/sys/privcmd.h>
-
-#include <xen-tools/libs.h>
-
-#if defined(HAVE_VALGRIND_MEMCHECK_H) && !defined(NDEBUG) && !defined(__MINIOS__)
-/* Compile in Valgrind client requests? */
-#include <valgrind/memcheck.h>
-#else
-#define VALGRIND_MAKE_MEM_UNDEFINED(addr, len) /* addr, len */
-#endif
-
-#if defined(__MINIOS__)
-/*
- * MiniOS's libc doesn't know about sys/uio.h or writev().
- * Declare enough of sys/uio.h to compile.
- */
-struct iovec {
-    void *iov_base;
-    size_t iov_len;
-};
-#else
-#include <sys/uio.h>
-#endif
-
-#define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
-
-#define GET_FIELD(_p, _f, _w) (((_w) == 8) ? ((_p)->x64._f) : ((_p)->x32._f))
-
-#define SET_FIELD(_p, _f, _v, _w) do {          \
-    if ((_w) == 8)                              \
-        (_p)->x64._f = (_v);                    \
-    else                                        \
-        (_p)->x32._f = (_v);                    \
-} while (0)
-
-/* XXX SMH: following skanky macros rely on variable p2m_size being set */
-/* XXX TJD: also, "guest_width" should be the guest's sizeof(unsigned long) */
-
-struct domain_info_context {
-    unsigned int guest_width;
-    unsigned long p2m_size;
-};
-
-/* Number of xen_pfn_t in a page */
-#define FPP             (PAGE_SIZE/(dinfo->guest_width))
-
-/* Number of entries in the pfn_to_mfn_frame_list_list */
-#define P2M_FLL_ENTRIES (((dinfo->p2m_size)+(FPP*FPP)-1)/(FPP*FPP))
-
-/* Number of entries in the pfn_to_mfn_frame_list */
-#define P2M_FL_ENTRIES  (((dinfo->p2m_size)+FPP-1)/FPP)
-
-/* Size in bytes of the pfn_to_mfn_frame_list     */
-#define P2M_GUEST_FL_SIZE ((P2M_FL_ENTRIES) * (dinfo->guest_width))
-#define P2M_TOOLS_FL_SIZE ((P2M_FL_ENTRIES) *                           \
-                           max_t(size_t, sizeof(xen_pfn_t), dinfo->guest_width))
-
-#define DECLARE_DOMCTL struct xen_domctl domctl
-#define DECLARE_SYSCTL struct xen_sysctl sysctl
-#define DECLARE_PHYSDEV_OP struct physdev_op physdev_op
-#define DECLARE_FLASK_OP struct xen_flask_op op
-#define DECLARE_PLATFORM_OP struct xen_platform_op platform_op
-
-#undef PAGE_SHIFT
-#undef PAGE_SIZE
-#undef PAGE_MASK
-#define PAGE_SHIFT              XC_PAGE_SHIFT
-#define PAGE_SIZE               XC_PAGE_SIZE
-#define PAGE_MASK               XC_PAGE_MASK
-
-#define INVALID_PFN ((xen_pfn_t)-1)
-
-/*
-** Define max dirty page cache to permit during save/restore -- need to balance 
-** keeping cache usage down with CPU impact of invalidating too often.
-** (Currently 16MB)
-*/
-#define MAX_PAGECACHE_USAGE (4*1024)
-
-struct xc_interface_core {
-    int flags;
-    xentoollog_logger *error_handler,   *error_handler_tofree;
-    xentoollog_logger *dombuild_logger, *dombuild_logger_tofree;
-    struct xc_error last_error; /* for xc_get_last_error */
-    FILE *dombuild_logger_file;
-    const char *currently_progress_reporting;
-
-    /* Hypercall interface */
-    xencall_handle *xcall;
-
-    /* Foreign mappings */
-    xenforeignmemory_handle *fmem;
-
-    /* Device model */
-    xendevicemodel_handle *dmod;
-};
-
-void *osdep_alloc_hypercall_buffer(xc_interface *xch, int npages);
-void osdep_free_hypercall_buffer(xc_interface *xch, void *ptr, int npages);
-
-void xc_report_error(xc_interface *xch, int code, const char *fmt, ...)
-    __attribute__((format(printf,3,4)));
-void xc_reportv(xc_interface *xch, xentoollog_logger *lg, xentoollog_level,
-                int code, const char *fmt, va_list args)
-     __attribute__((format(printf,5,0)));
-void xc_report(xc_interface *xch, xentoollog_logger *lg, xentoollog_level,
-               int code, const char *fmt, ...)
-     __attribute__((format(printf,5,6)));
-
-const char *xc_set_progress_prefix(xc_interface *xch, const char *doing);
-void xc_report_progress_single(xc_interface *xch, const char *doing);
-void xc_report_progress_step(xc_interface *xch,
-                             unsigned long done, unsigned long total);
-
-/* anamorphic macros:  struct xc_interface *xch  must be in scope */
-
-#define IPRINTF(_f, _a...)  do { int IPRINTF_errno = errno; \
-        xc_report(xch, xch->error_handler, XTL_INFO,0, _f , ## _a); \
-        errno = IPRINTF_errno; \
-        } while (0)
-#define DPRINTF(_f, _a...) do { int DPRINTF_errno = errno; \
-        xc_report(xch, xch->error_handler, XTL_DETAIL,0, _f , ## _a); \
-        errno = DPRINTF_errno; \
-        } while (0)
-#define DBGPRINTF(_f, _a...)  do { int DBGPRINTF_errno = errno; \
-        xc_report(xch, xch->error_handler, XTL_DEBUG,0, _f , ## _a); \
-        errno = DBGPRINTF_errno; \
-        } while (0)
-
-#define ERROR(_m, _a...)  do { int ERROR_errno = errno; \
-        xc_report_error(xch,XC_INTERNAL_ERROR,_m , ## _a ); \
-        errno = ERROR_errno; \
-        } while (0)
-#define PERROR(_m, _a...) do { int PERROR_errno = errno; \
-        xc_report_error(xch,XC_INTERNAL_ERROR,_m " (%d = %s)", \
-        ## _a , errno, xc_strerror(xch, errno)); \
-        errno = PERROR_errno; \
-        } while (0)
-
-/*
- * HYPERCALL ARGUMENT BUFFERS
- *
- * Augment the public hypercall buffer interface with the ability to
- * bounce between user provided buffers and hypercall safe memory.
- *
- * Use xc_hypercall_bounce_pre/post instead of
- * xc_hypercall_buffer_alloc/free(_pages).  The specified user
- * supplied buffer is automatically copied in/out of the hypercall
- * safe memory.
- */
-enum {
-    XC_HYPERCALL_BUFFER_BOUNCE_NONE = 0,
-    XC_HYPERCALL_BUFFER_BOUNCE_IN   = 1,
-    XC_HYPERCALL_BUFFER_BOUNCE_OUT  = 2,
-    XC_HYPERCALL_BUFFER_BOUNCE_BOTH = 3
-};
-
-/*
- * Declare a named bounce buffer.
- *
- * Normally you should use DECLARE_HYPERCALL_BOUNCE (see below).
- *
- * This declaration should only be used when the user pointer is
- * non-trivial, e.g. when it is contained within an existing data
- * structure.
- */
-#define DECLARE_NAMED_HYPERCALL_BOUNCE(_name, _ubuf, _sz, _dir) \
-    xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = {  \
-        .hbuf = NULL,                                           \
-        .param_shadow = NULL,                                   \
-        .sz = _sz, .dir = _dir, .ubuf = _ubuf,                  \
-    }
-
-/*
- * Declare a bounce buffer shadowing the named user data pointer.
- */
-#define DECLARE_HYPERCALL_BOUNCE(_ubuf, _sz, _dir) DECLARE_NAMED_HYPERCALL_BOUNCE(_ubuf, _ubuf, _sz, _dir)
-
-/*
- * Declare a bounce buffer shadowing the named user data pointer that
- * cannot be modified.
- */
-#define DECLARE_HYPERCALL_BOUNCE_IN(_ubuf, _sz)                     \
-    DECLARE_NAMED_HYPERCALL_BOUNCE(_ubuf, (void *)(_ubuf), _sz,     \
-                                   XC_HYPERCALL_BUFFER_BOUNCE_IN)
-
-/*
- * Set the size of data to bounce. Useful when the size is not known
- * when the bounce buffer is declared.
- */
-#define HYPERCALL_BOUNCE_SET_SIZE(_buf, _sz) do { (HYPERCALL_BUFFER(_buf))->sz = _sz; } while (0)
-
-/*
- * Change the direction.
- *
- * Can only be used if the bounce_pre/bounce_post commands have
- * not been used.
- */
-#define HYPERCALL_BOUNCE_SET_DIR(_buf, _dir) do { if ((HYPERCALL_BUFFER(_buf))->hbuf)         \
-                                                        assert(1);                            \
-                                                   (HYPERCALL_BUFFER(_buf))->dir = _dir;      \
-                                                } while (0)
-
-/*
- * Initialise and free hypercall safe memory. Takes care of any required
- * copying.
- */
-int xc__hypercall_bounce_pre(xc_interface *xch, xc_hypercall_buffer_t *bounce);
-#define xc_hypercall_bounce_pre(_xch, _name) xc__hypercall_bounce_pre(_xch, HYPERCALL_BUFFER(_name))
-void xc__hypercall_bounce_post(xc_interface *xch, xc_hypercall_buffer_t *bounce);
-#define xc_hypercall_bounce_post(_xch, _name) xc__hypercall_bounce_post(_xch, HYPERCALL_BUFFER(_name))
-
-/*
- * Release hypercall buffer cache
- */
-void xc__hypercall_buffer_cache_release(xc_interface *xch);
-
-/*
- * Hypercall interfaces.
- */
-
-static inline int do_xen_version(xc_interface *xch, int cmd, xc_hypercall_buffer_t *dest)
-{
-    DECLARE_HYPERCALL_BUFFER_ARGUMENT(dest);
-    return xencall2(xch->xcall, __HYPERVISOR_xen_version,
-                    cmd, HYPERCALL_BUFFER_AS_ARG(dest));
-}
-
-static inline int do_physdev_op(xc_interface *xch, int cmd, void *op, size_t len)
-{
-    int ret = -1;
-    DECLARE_HYPERCALL_BOUNCE(op, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( xc_hypercall_bounce_pre(xch, op) )
-    {
-        PERROR("Could not bounce memory for physdev hypercall");
-        goto out1;
-    }
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_physdev_op,
-                   cmd, HYPERCALL_BUFFER_AS_ARG(op));
-    if ( ret < 0 )
-    {
-        if ( errno == EACCES )
-            DPRINTF("physdev operation failed -- need to"
-                    " rebuild the user-space tool set?\n");
-    }
-
-    xc_hypercall_bounce_post(xch, op);
-out1:
-    return ret;
-}
-
-static inline int do_domctl_maybe_retry_efault(xc_interface *xch,
-                                               struct xen_domctl *domctl,
-                                               unsigned int retries)
-{
-    int ret = -1;
-    unsigned int retry_cnt = 0;
-
-    DECLARE_HYPERCALL_BOUNCE(domctl, sizeof(*domctl), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    domctl->interface_version = XEN_DOMCTL_INTERFACE_VERSION;
-
-    if ( xc_hypercall_bounce_pre(xch, domctl) )
-    {
-        PERROR("Could not bounce buffer for domctl hypercall");
-        goto out1;
-    }
-
-    do {
-        ret = xencall1(xch->xcall, __HYPERVISOR_domctl,
-                       HYPERCALL_BUFFER_AS_ARG(domctl));
-    } while ( ret < 0 && errno == EFAULT && retry_cnt++ < retries );
-
-    if ( ret < 0 )
-    {
-        if ( errno == EACCES )
-            DPRINTF("domctl operation failed -- need to"
-                    " rebuild the user-space tool set?\n");
-    }
-
-    xc_hypercall_bounce_post(xch, domctl);
- out1:
-    return ret;
-}
-
-static inline int do_domctl(xc_interface *xch, struct xen_domctl *domctl)
-{
-    return do_domctl_maybe_retry_efault(xch, domctl, 0);
-}
-
-static inline int do_domctl_retry_efault(xc_interface *xch, struct xen_domctl *domctl)
-{
-    unsigned int retries = xencall_buffers_never_fault(xch->xcall) ? 0 : 2;
-
-    return do_domctl_maybe_retry_efault(xch, domctl, retries);
-}
-
-static inline int do_sysctl(xc_interface *xch, struct xen_sysctl *sysctl)
-{
-    int ret = -1;
-    DECLARE_HYPERCALL_BOUNCE(sysctl, sizeof(*sysctl), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    sysctl->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
-
-    if ( xc_hypercall_bounce_pre(xch, sysctl) )
-    {
-        PERROR("Could not bounce buffer for sysctl hypercall");
-        goto out1;
-    }
-
-    ret = xencall1(xch->xcall, __HYPERVISOR_sysctl,
-                   HYPERCALL_BUFFER_AS_ARG(sysctl));
-    if ( ret < 0 )
-    {
-        if ( errno == EACCES )
-            DPRINTF("sysctl operation failed -- need to"
-                    " rebuild the user-space tool set?\n");
-    }
-
-    xc_hypercall_bounce_post(xch, sysctl);
- out1:
-    return ret;
-}
-
-static inline int do_platform_op(xc_interface *xch,
-                                 struct xen_platform_op *platform_op)
-{
-    int ret = -1;
-    DECLARE_HYPERCALL_BOUNCE(platform_op, sizeof(*platform_op),
-                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    platform_op->interface_version = XENPF_INTERFACE_VERSION;
-
-    if ( xc_hypercall_bounce_pre(xch, platform_op) )
-    {
-        PERROR("Could not bounce buffer for platform_op hypercall");
-        return -1;
-    }
-
-    ret = xencall1(xch->xcall, __HYPERVISOR_platform_op,
-                   HYPERCALL_BUFFER_AS_ARG(platform_op));
-    if ( ret < 0 )
-    {
-        if ( errno == EACCES )
-            DPRINTF("platform operation failed -- need to"
-                    " rebuild the user-space tool set?\n");
-    }
-
-    xc_hypercall_bounce_post(xch, platform_op);
-    return ret;
-}
-
-static inline int do_multicall_op(xc_interface *xch,
-                                  xc_hypercall_buffer_t *call_list,
-                                  uint32_t nr_calls)
-{
-    int ret = -1;
-    DECLARE_HYPERCALL_BUFFER_ARGUMENT(call_list);
-
-    ret = xencall2(xch->xcall, __HYPERVISOR_multicall,
-                   HYPERCALL_BUFFER_AS_ARG(call_list), nr_calls);
-    if ( ret < 0 )
-    {
-        if ( errno == EACCES )
-            DPRINTF("multicall operation failed -- need to"
-                    " rebuild the user-space tool set?\n");
-    }
-
-    return ret;
-}
-
-long do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len);
-
-void *xc_map_foreign_ranges(xc_interface *xch, uint32_t dom,
-                            size_t size, int prot, size_t chunksize,
-                            privcmd_mmap_entry_t entries[], int nentries);
-
-int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
-                          unsigned int num, xen_pfn_t *);
-
-void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits);
-void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits);
-
-/* Optionally flush file to disk and discard page cache */
-void discard_file_cache(xc_interface *xch, int fd, int flush);
-
-#define MAX_MMU_UPDATES 1024
-struct xc_mmu {
-    mmu_update_t updates[MAX_MMU_UPDATES];
-    int          idx;
-    unsigned int subject;
-};
-/* Structure returned by xc_alloc_mmu_updates must be free()'ed by caller. */
-struct xc_mmu *xc_alloc_mmu_updates(xc_interface *xch, unsigned int subject);
-int xc_add_mmu_update(xc_interface *xch, struct xc_mmu *mmu,
-                   unsigned long long ptr, unsigned long long val);
-int xc_flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu);
-
-/* Return 0 on success; -1 on error setting errno. */
-int read_exact(int fd, void *data, size_t size); /* EOF => -1, errno=0 */
-int write_exact(int fd, const void *data, size_t size);
-int writev_exact(int fd, const struct iovec *iov, int iovcnt);
-
-int xc_ffs8(uint8_t x);
-int xc_ffs16(uint16_t x);
-int xc_ffs32(uint32_t x);
-int xc_ffs64(uint64_t x);
-
-#define DOMPRINTF(fmt, args...) xc_dom_printf(dom->xch, fmt, ## args)
-#define DOMPRINTF_CALLED(xch) xc_dom_printf((xch), "%s: called", __FUNCTION__)
-
-/**
- * vm_event operations. Internal use only.
- */
-int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port);
-/*
- * Enables vm_event and returns the mapped ring page indicated by param.
- * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
- */
-void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
-                         uint32_t *port);
-
-int do_dm_op(xc_interface *xch, uint32_t domid, unsigned int nr_bufs, ...);
-
-#endif /* __XC_PRIVATE_H__ */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_psr.c b/tools/libxc/xc_psr.c
deleted file mode 100644 (file)
index 1a0ab63..0000000
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * xc_psr.c
- *
- * platform shared resource related API functions.
- *
- * Copyright (C) 2014      Intel Corporation
- * Author Dongxiao Xu <dongxiao.xu@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; version 2.1 only. with the special
- * exception on linking described in file LICENSE.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- */
-
-#include <assert.h>
-#include "xc_private.h"
-#include "xc_msr_x86.h"
-
-#define IA32_CMT_CTR_ERROR_MASK         (0x3ull << 62)
-
-#define EVTID_L3_OCCUPANCY             0x1
-#define EVTID_TOTAL_MEM_COUNT          0x2
-#define EVTID_LOCAL_MEM_COUNT          0x3
-
-int xc_psr_cmt_attach(xc_interface *xch, uint32_t domid)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_psr_cmt_op;
-    domctl.domain = domid;
-    domctl.u.psr_cmt_op.cmd = XEN_DOMCTL_PSR_CMT_OP_ATTACH;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_psr_cmt_detach(xc_interface *xch, uint32_t domid)
-{
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_psr_cmt_op;
-    domctl.domain = domid;
-    domctl.u.psr_cmt_op.cmd = XEN_DOMCTL_PSR_CMT_OP_DETACH;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_psr_cmt_get_domain_rmid(xc_interface *xch, uint32_t domid,
-                               uint32_t *rmid)
-{
-    int rc;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_psr_cmt_op;
-    domctl.domain = domid;
-    domctl.u.psr_cmt_op.cmd = XEN_DOMCTL_PSR_CMT_OP_QUERY_RMID;
-
-    rc = do_domctl(xch, &domctl);
-
-    if ( !rc )
-        *rmid = domctl.u.psr_cmt_op.data;
-
-    return rc;
-}
-
-int xc_psr_cmt_get_total_rmid(xc_interface *xch, uint32_t *total_rmid)
-{
-    static int val = 0;
-    int rc;
-    DECLARE_SYSCTL;
-
-    if ( val )
-    {
-        *total_rmid = val;
-        return 0;
-    }
-
-    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
-    sysctl.u.psr_cmt_op.cmd = XEN_SYSCTL_PSR_CMT_get_total_rmid;
-    sysctl.u.psr_cmt_op.flags = 0;
-
-    rc = xc_sysctl(xch, &sysctl);
-    if ( !rc )
-        val = *total_rmid = sysctl.u.psr_cmt_op.u.data;
-
-    return rc;
-}
-
-int xc_psr_cmt_get_l3_upscaling_factor(xc_interface *xch,
-                                       uint32_t *upscaling_factor)
-{
-    static int val = 0;
-    int rc;
-    DECLARE_SYSCTL;
-
-    if ( val )
-    {
-        *upscaling_factor = val;
-        return 0;
-    }
-
-    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
-    sysctl.u.psr_cmt_op.cmd =
-        XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor;
-    sysctl.u.psr_cmt_op.flags = 0;
-
-    rc = xc_sysctl(xch, &sysctl);
-    if ( !rc )
-        val = *upscaling_factor = sysctl.u.psr_cmt_op.u.data;
-
-    return rc;
-}
-
-int xc_psr_cmt_get_l3_event_mask(xc_interface *xch, uint32_t *event_mask)
-{
-    int rc;
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
-    sysctl.u.psr_cmt_op.cmd =
-        XEN_SYSCTL_PSR_CMT_get_l3_event_mask;
-    sysctl.u.psr_cmt_op.flags = 0;
-
-    rc = xc_sysctl(xch, &sysctl);
-    if ( !rc )
-        *event_mask = sysctl.u.psr_cmt_op.u.data;
-
-    return rc;
-}
-
-int xc_psr_cmt_get_l3_cache_size(xc_interface *xch, uint32_t cpu,
-                                 uint32_t *l3_cache_size)
-{
-    static int val = 0;
-    int rc;
-    DECLARE_SYSCTL;
-
-    if ( val )
-    {
-        *l3_cache_size = val;
-        return 0;
-    }
-
-    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
-    sysctl.u.psr_cmt_op.cmd =
-        XEN_SYSCTL_PSR_CMT_get_l3_cache_size;
-    sysctl.u.psr_cmt_op.flags = 0;
-    sysctl.u.psr_cmt_op.u.l3_cache.cpu = cpu;
-
-    rc = xc_sysctl(xch, &sysctl);
-    if ( !rc )
-        val = *l3_cache_size= sysctl.u.psr_cmt_op.u.data;
-
-    return rc;
-}
-
-int xc_psr_cmt_get_data(xc_interface *xch, uint32_t rmid, uint32_t cpu,
-                        xc_psr_cmt_type type, uint64_t *monitor_data,
-                        uint64_t *tsc)
-{
-    xc_resource_op_t op;
-    xc_resource_entry_t entries[3];
-    xc_resource_entry_t *tsc_entry = NULL;
-    uint32_t evtid, nr = 0;
-    int rc;
-
-    switch ( type )
-    {
-    case XC_PSR_CMT_L3_OCCUPANCY:
-        evtid = EVTID_L3_OCCUPANCY;
-        break;
-    case XC_PSR_CMT_TOTAL_MEM_COUNT:
-        evtid = EVTID_TOTAL_MEM_COUNT;
-        break;
-    case XC_PSR_CMT_LOCAL_MEM_COUNT:
-        evtid = EVTID_LOCAL_MEM_COUNT;
-        break;
-    default:
-        return -1;
-    }
-
-    entries[nr].u.cmd = XEN_RESOURCE_OP_MSR_WRITE;
-    entries[nr].idx = MSR_IA32_CMT_EVTSEL;
-    entries[nr].val = (uint64_t)rmid << 32 | evtid;
-    entries[nr].rsvd = 0;
-    nr++;
-
-    entries[nr].u.cmd = XEN_RESOURCE_OP_MSR_READ;
-    entries[nr].idx = MSR_IA32_CMT_CTR;
-    entries[nr].val = 0;
-    entries[nr].rsvd = 0;
-    nr++;
-
-    if ( tsc != NULL )
-    {
-        tsc_entry = &entries[nr];
-        entries[nr].u.cmd = XEN_RESOURCE_OP_MSR_READ;
-        entries[nr].idx = MSR_IA32_TSC;
-        entries[nr].val = 0;
-        entries[nr].rsvd = 0;
-        nr++;
-    }
-
-    assert(nr <= ARRAY_SIZE(entries));
-
-    op.cpu = cpu;
-    op.nr_entries = nr;
-    op.entries = entries;
-
-    rc = xc_resource_op(xch, 1, &op);
-    if ( rc < 0 )
-        return rc;
-
-    if ( op.result != nr || entries[1].val & IA32_CMT_CTR_ERROR_MASK )
-        return -1;
-
-    *monitor_data = entries[1].val;
-
-    if ( tsc_entry != NULL )
-        *tsc = tsc_entry->val;
-
-    return 0;
-}
-
-int xc_psr_cmt_enabled(xc_interface *xch)
-{
-    static int val = -1;
-    int rc;
-    DECLARE_SYSCTL;
-
-    if ( val >= 0 )
-        return val;
-
-    sysctl.cmd = XEN_SYSCTL_psr_cmt_op;
-    sysctl.u.psr_cmt_op.cmd = XEN_SYSCTL_PSR_CMT_enabled;
-    sysctl.u.psr_cmt_op.flags = 0;
-
-    rc = do_sysctl(xch, &sysctl);
-    if ( !rc )
-    {
-        val = sysctl.u.psr_cmt_op.u.data;
-        return val;
-    }
-
-    return 0;
-}
-int xc_psr_set_domain_data(xc_interface *xch, uint32_t domid,
-                           xc_psr_type type, uint32_t target,
-                           uint64_t data)
-{
-    DECLARE_DOMCTL;
-    uint32_t cmd;
-
-    switch ( type )
-    {
-    case XC_PSR_CAT_L3_CBM:
-        cmd = XEN_DOMCTL_PSR_SET_L3_CBM;
-        break;
-    case XC_PSR_CAT_L3_CBM_CODE:
-        cmd = XEN_DOMCTL_PSR_SET_L3_CODE;
-        break;
-    case XC_PSR_CAT_L3_CBM_DATA:
-        cmd = XEN_DOMCTL_PSR_SET_L3_DATA;
-        break;
-    case XC_PSR_CAT_L2_CBM:
-        cmd = XEN_DOMCTL_PSR_SET_L2_CBM;
-        break;
-    case XC_PSR_MBA_THRTL:
-        cmd = XEN_DOMCTL_PSR_SET_MBA_THRTL;
-        break;
-    default:
-        errno = EINVAL;
-        return -1;
-    }
-
-    domctl.cmd = XEN_DOMCTL_psr_alloc;
-    domctl.domain = domid;
-    domctl.u.psr_alloc.cmd = cmd;
-    domctl.u.psr_alloc.target = target;
-    domctl.u.psr_alloc.data = data;
-
-    return do_domctl(xch, &domctl);
-}
-
-int xc_psr_get_domain_data(xc_interface *xch, uint32_t domid,
-                           xc_psr_type type, uint32_t target,
-                           uint64_t *data)
-{
-    int rc;
-    DECLARE_DOMCTL;
-    uint32_t cmd;
-
-    switch ( type )
-    {
-    case XC_PSR_CAT_L3_CBM:
-        cmd = XEN_DOMCTL_PSR_GET_L3_CBM;
-        break;
-    case XC_PSR_CAT_L3_CBM_CODE:
-        cmd = XEN_DOMCTL_PSR_GET_L3_CODE;
-        break;
-    case XC_PSR_CAT_L3_CBM_DATA:
-        cmd = XEN_DOMCTL_PSR_GET_L3_DATA;
-        break;
-    case XC_PSR_CAT_L2_CBM:
-        cmd = XEN_DOMCTL_PSR_GET_L2_CBM;
-        break;
-    case XC_PSR_MBA_THRTL:
-        cmd = XEN_DOMCTL_PSR_GET_MBA_THRTL;
-        break;
-    default:
-        errno = EINVAL;
-        return -1;
-    }
-
-    domctl.cmd = XEN_DOMCTL_psr_alloc;
-    domctl.domain = domid;
-    domctl.u.psr_alloc.cmd = cmd;
-    domctl.u.psr_alloc.target = target;
-
-    rc = do_domctl(xch, &domctl);
-
-    if ( !rc )
-        *data = domctl.u.psr_alloc.data;
-
-    return rc;
-}
-
-int xc_psr_get_hw_info(xc_interface *xch, uint32_t socket,
-                       xc_psr_feat_type type, xc_psr_hw_info *hw_info)
-{
-    int rc = -1;
-    DECLARE_SYSCTL;
-
-    if ( !hw_info )
-    {
-        errno = EINVAL;
-        return rc;
-    }
-
-    sysctl.cmd = XEN_SYSCTL_psr_alloc;
-    sysctl.u.psr_alloc.target = socket;
-
-    switch ( type )
-    {
-    case XC_PSR_CAT_L2:
-    case XC_PSR_CAT_L3:
-        sysctl.u.psr_alloc.cmd = (type == XC_PSR_CAT_L2) ?
-                                 XEN_SYSCTL_PSR_get_l2_info :
-                                 XEN_SYSCTL_PSR_get_l3_info;
-
-        rc = xc_sysctl(xch, &sysctl);
-        if ( rc )
-            break;
-
-        hw_info->cat.cos_max = sysctl.u.psr_alloc.u.cat_info.cos_max;
-        hw_info->cat.cbm_len = sysctl.u.psr_alloc.u.cat_info.cbm_len;
-        hw_info->cat.cdp_enabled = (type == XC_PSR_CAT_L2) ?
-                                   false :
-                                   (sysctl.u.psr_alloc.u.cat_info.flags &
-                                    XEN_SYSCTL_PSR_CAT_L3_CDP);
-
-        break;
-    case XC_PSR_MBA:
-        sysctl.u.psr_alloc.cmd = XEN_SYSCTL_PSR_get_mba_info;
-        rc = xc_sysctl(xch, &sysctl);
-        if ( rc )
-            break;
-
-        hw_info->mba.cos_max = sysctl.u.psr_alloc.u.mba_info.cos_max;
-        hw_info->mba.thrtl_max = sysctl.u.psr_alloc.u.mba_info.thrtl_max;
-        hw_info->mba.linear = sysctl.u.psr_alloc.u.mba_info.flags &
-                              XEN_SYSCTL_PSR_MBA_LINEAR;
-
-        break;
-    default:
-        errno = EOPNOTSUPP;
-        break;
-    }
-
-    return rc;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_resource.c b/tools/libxc/xc_resource.c
deleted file mode 100644 (file)
index 3394cc1..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * xc_resource.c
- *
- * Generic resource access API
- *
- * Copyright (C) 2014      Intel Corporation
- * Author Dongxiao Xu <dongxiao.xu@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; version 2.1 only. with the special
- * exception on linking described in file LICENSE.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- */
-
-#include "xc_private.h"
-
-static int xc_resource_op_one(xc_interface *xch, xc_resource_op_t *op)
-{
-    int rc;
-    DECLARE_PLATFORM_OP;
-    DECLARE_NAMED_HYPERCALL_BOUNCE(entries, op->entries,
-                                op->nr_entries * sizeof(*op->entries),
-                                XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( xc_hypercall_bounce_pre(xch, entries) )
-        return -1;
-
-    platform_op.cmd = XENPF_resource_op;
-    platform_op.u.resource_op.nr_entries = op->nr_entries;
-    platform_op.u.resource_op.cpu = op->cpu;
-    set_xen_guest_handle(platform_op.u.resource_op.entries, entries);
-
-    rc = do_platform_op(xch, &platform_op);
-    op->result = rc;
-
-    xc_hypercall_bounce_post(xch, entries);
-
-    return rc;
-}
-
-static int xc_resource_op_multi(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops)
-{
-    int rc, i, entries_size;
-    xc_resource_op_t *op;
-    multicall_entry_t *call;
-    DECLARE_HYPERCALL_BUFFER(multicall_entry_t, call_list);
-    xc_hypercall_buffer_array_t *platform_ops, *entries_list = NULL;
-
-    call_list = xc_hypercall_buffer_alloc(xch, call_list,
-                                          sizeof(*call_list) * nr_ops);
-    if ( !call_list )
-        return -1;
-
-    platform_ops = xc_hypercall_buffer_array_create(xch, nr_ops);
-    if ( !platform_ops )
-    {
-        rc = -1;
-        goto out;
-    }
-
-    entries_list = xc_hypercall_buffer_array_create(xch, nr_ops);
-    if ( !entries_list )
-    {
-        rc = -1;
-        goto out;
-    }
-
-    for ( i = 0; i < nr_ops; i++ )
-    {
-        DECLARE_HYPERCALL_BUFFER(xen_platform_op_t, platform_op);
-        DECLARE_HYPERCALL_BUFFER(xc_resource_entry_t, entries);
-
-        op = ops + i;
-
-        platform_op = xc_hypercall_buffer_array_alloc(xch, platform_ops, i,
-                        platform_op, sizeof(xen_platform_op_t));
-        if ( !platform_op )
-        {
-            rc = -1;
-            goto out;
-        }
-
-        entries_size = sizeof(xc_resource_entry_t) * op->nr_entries;
-        entries = xc_hypercall_buffer_array_alloc(xch, entries_list, i,
-                   entries, entries_size);
-        if ( !entries)
-        {
-            rc = -1;
-            goto out;
-        }
-        memcpy(entries, op->entries, entries_size);
-
-        call = call_list + i;
-        call->op = __HYPERVISOR_platform_op;
-        call->args[0] = HYPERCALL_BUFFER_AS_ARG(platform_op);
-
-        platform_op->interface_version = XENPF_INTERFACE_VERSION;
-        platform_op->cmd = XENPF_resource_op;
-        platform_op->u.resource_op.cpu = op->cpu;
-        platform_op->u.resource_op.nr_entries = op->nr_entries;
-        set_xen_guest_handle(platform_op->u.resource_op.entries, entries);
-    }
-
-    rc = do_multicall_op(xch, HYPERCALL_BUFFER(call_list), nr_ops);
-
-    for ( i = 0; i < nr_ops; i++ )
-    {
-        DECLARE_HYPERCALL_BUFFER(xc_resource_entry_t, entries);
-        op = ops + i;
-
-        call = call_list + i;
-        op->result = call->result;
-
-        entries_size = sizeof(xc_resource_entry_t) * op->nr_entries;
-        entries = xc_hypercall_buffer_array_get(xch, entries_list, i,
-                   entries, entries_size);
-        memcpy(op->entries, entries, entries_size);
-    }
-
-out:
-    xc_hypercall_buffer_array_destroy(xch, entries_list);
-    xc_hypercall_buffer_array_destroy(xch, platform_ops);
-    xc_hypercall_buffer_free(xch, call_list);
-    return rc;
-}
-
-int xc_resource_op(xc_interface *xch, uint32_t nr_ops, xc_resource_op_t *ops)
-{
-    if ( nr_ops == 1 )
-        return xc_resource_op_one(xch, ops);
-
-    if ( nr_ops > 1 )
-        return xc_resource_op_multi(xch, nr_ops, ops);
-
-    return -1;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_resume.c b/tools/libxc/xc_resume.c
deleted file mode 100644 (file)
index 94c6c9f..0000000
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-#if defined(__i386__) || defined(__x86_64__)
-
-#include <xen/foreign/x86_32.h>
-#include <xen/foreign/x86_64.h>
-#include <xen/hvm/params.h>
-
-static int modify_returncode(xc_interface *xch, uint32_t domid)
-{
-    vcpu_guest_context_any_t ctxt;
-    xc_dominfo_t info;
-    xen_capabilities_info_t caps;
-    struct domain_info_context _dinfo = {};
-    struct domain_info_context *dinfo = &_dinfo;
-    int rc;
-
-    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 ||
-         info.domid != domid )
-    {
-        PERROR("Could not get domain info");
-        return -1;
-    }
-
-    if ( !info.shutdown || (info.shutdown_reason != SHUTDOWN_suspend) )
-    {
-        ERROR("Dom %d not suspended: (shutdown %d, reason %d)", domid,
-              info.shutdown, info.shutdown_reason);
-        errno = EINVAL;
-        return -1;
-    }
-
-    if ( info.hvm )
-    {
-        /* HVM guests without PV drivers have no return code to modify. */
-        uint64_t irq = 0;
-        xc_hvm_param_get(xch, domid, HVM_PARAM_CALLBACK_IRQ, &irq);
-        if ( !irq )
-            return 0;
-
-        /* HVM guests have host address width. */
-        if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
-        {
-            PERROR("Could not get Xen capabilities");
-            return -1;
-        }
-        dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4;
-    }
-    else
-    {
-        /* Probe PV guest address width. */
-        if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) )
-            return -1;
-    }
-
-    if ( (rc = xc_vcpu_getcontext(xch, domid, 0, &ctxt)) != 0 )
-        return rc;
-
-    SET_FIELD(&ctxt, user_regs.eax, 1, dinfo->guest_width);
-
-    if ( (rc = xc_vcpu_setcontext(xch, domid, 0, &ctxt)) != 0 )
-        return rc;
-
-    return 0;
-}
-
-#else
-
-static int modify_returncode(xc_interface *xch, uint32_t domid)
-{
-    return 0;
-
-}
-
-#endif
-
-static int xc_domain_resume_cooperative(xc_interface *xch, uint32_t domid)
-{
-    DECLARE_DOMCTL;
-    int rc;
-
-    /*
-     * Set hypercall return code to indicate that suspend is cancelled
-     * (rather than resuming in a new domain context).
-     */
-    if ( (rc = modify_returncode(xch, domid)) != 0 )
-        return rc;
-
-    domctl.cmd = XEN_DOMCTL_resumedomain;
-    domctl.domain = domid;
-    return do_domctl(xch, &domctl);
-}
-
-#if defined(__i386__) || defined(__x86_64__)
-static int xc_domain_resume_hvm(xc_interface *xch, uint32_t domid)
-{
-    DECLARE_DOMCTL;
-
-    /*
-     * The domctl XEN_DOMCTL_resumedomain unpause each vcpu. After
-     * the domctl, the guest will run.
-     *
-     * If it is PVHVM, the guest called the hypercall
-     *    SCHEDOP_shutdown:SHUTDOWN_suspend
-     * to suspend itself. We don't modify the return code, so the PV driver
-     * will disconnect and reconnect.
-     *
-     * If it is a HVM, the guest will continue running.
-     */
-    domctl.cmd = XEN_DOMCTL_resumedomain;
-    domctl.domain = domid;
-    return do_domctl(xch, &domctl);
-}
-#endif
-
-static int xc_domain_resume_any(xc_interface *xch, uint32_t domid)
-{
-    DECLARE_DOMCTL;
-    xc_dominfo_t info;
-    int i, rc = -1;
-#if defined(__i386__) || defined(__x86_64__)
-    struct domain_info_context _dinfo = { .guest_width = 0,
-                                          .p2m_size = 0 };
-    struct domain_info_context *dinfo = &_dinfo;
-    unsigned long mfn;
-    vcpu_guest_context_any_t ctxt;
-    start_info_t *start_info;
-    shared_info_t *shinfo = NULL;
-    xen_pfn_t *p2m_frame_list_list = NULL;
-    xen_pfn_t *p2m_frame_list = NULL;
-    xen_pfn_t *p2m = NULL;
-#endif
-
-    if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 )
-    {
-        PERROR("Could not get domain info");
-        return rc;
-    }
-
-    /*
-     * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
-     */
-#if defined(__i386__) || defined(__x86_64__)
-    if ( info.hvm )
-        return xc_domain_resume_hvm(xch, domid);
-
-    if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
-    {
-        PERROR("Could not get domain width");
-        return rc;
-    }
-    if ( dinfo->guest_width != sizeof(long) )
-    {
-        ERROR("Cannot resume uncooperative cross-address-size guests");
-        return rc;
-    }
-
-    /* Map the shared info frame */
-    shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
-                                  PROT_READ, info.shared_info_frame);
-    if ( shinfo == NULL )
-    {
-        ERROR("Couldn't map shared info");
-        goto out;
-    }
-
-    dinfo->p2m_size = shinfo->arch.max_pfn;
-
-    p2m_frame_list_list =
-        xc_map_foreign_range(xch, domid, PAGE_SIZE, PROT_READ,
-                             shinfo->arch.pfn_to_mfn_frame_list_list);
-    if ( p2m_frame_list_list == NULL )
-    {
-        ERROR("Couldn't map p2m_frame_list_list");
-        goto out;
-    }
-
-    p2m_frame_list = xc_map_foreign_pages(xch, domid, PROT_READ,
-                                          p2m_frame_list_list,
-                                          P2M_FLL_ENTRIES);
-    if ( p2m_frame_list == NULL )
-    {
-        ERROR("Couldn't map p2m_frame_list");
-        goto out;
-    }
-
-    /* Map all the frames of the pfn->mfn table. For migrate to succeed,
-       the guest must not change which frames are used for this purpose.
-       (its not clear why it would want to change them, and we'll be OK
-       from a safety POV anyhow. */
-    p2m = xc_map_foreign_pages(xch, domid, PROT_READ,
-                               p2m_frame_list,
-                               P2M_FL_ENTRIES);
-    if ( p2m == NULL )
-    {
-        ERROR("Couldn't map p2m table");
-        goto out;
-    }
-
-    if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
-    {
-        ERROR("Could not get vcpu context");
-        goto out;
-    }
-
-    mfn = GET_FIELD(&ctxt, user_regs.edx, dinfo->guest_width);
-
-    start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
-                                      PROT_READ | PROT_WRITE, mfn);
-    if ( start_info == NULL )
-    {
-        ERROR("Couldn't map start_info");
-        goto out;
-    }
-
-    start_info->store_mfn        = p2m[start_info->store_mfn];
-    start_info->console.domU.mfn = p2m[start_info->console.domU.mfn];
-
-    munmap(start_info, PAGE_SIZE);
-#endif /* defined(__i386__) || defined(__x86_64__) */
-
-    /* Reset all secondary CPU states. */
-    for ( i = 1; i <= info.max_vcpu_id; i++ )
-        if ( xc_vcpu_setcontext(xch, domid, i, NULL) != 0 )
-        {
-            ERROR("Couldn't reset vcpu state");
-            goto out;
-        }
-
-    /* Ready to resume domain execution now. */
-    domctl.cmd = XEN_DOMCTL_resumedomain;
-    domctl.domain = domid;
-    rc = do_domctl(xch, &domctl);
-
-out:
-#if defined(__i386__) || defined(__x86_64__)
-    if (p2m)
-        munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
-    if (p2m_frame_list)
-        munmap(p2m_frame_list, P2M_FLL_ENTRIES*PAGE_SIZE);
-    if (p2m_frame_list_list)
-        munmap(p2m_frame_list_list, PAGE_SIZE);
-    if (shinfo)
-        munmap(shinfo, PAGE_SIZE);
-#endif
-
-    return rc;
-}
-
-/*
- * Resume execution of a domain after suspend shutdown.
- * This can happen in one of two ways:
- *  1. (fast=1) Resume the guest without resetting the domain environment.
- *     The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend) will return 1.
- *
- *  2. (fast=0) Reset guest environment so it believes it is resumed in a new
- *     domain context. The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend)
- *     will return 0.
- *
- * (1) should only by used for guests which can handle the special return
- * code. Also note that the insertion of the return code is quite interesting
- * and that the guest MUST be paused - otherwise we would be corrupting
- * the guest vCPU state.
- *
- * (2) should be used only for guests which cannot handle the special
- * new return code - and it is always safe (but slower).
- */
-int xc_domain_resume(xc_interface *xch, uint32_t domid, int fast)
-{
-    return (fast
-            ? xc_domain_resume_cooperative(xch, domid)
-            : xc_domain_resume_any(xch, domid));
-}
diff --git a/tools/libxc/xc_rt.c b/tools/libxc/xc_rt.c
deleted file mode 100644 (file)
index ad257c6..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-/****************************************************************************
- *
- *        File: xc_rt.c
- *      Author: Sisu Xi
- *              Meng Xu
- *
- * Description: XC Interface to the rtds scheduler
- * Note: VCPU's parameter (period, budget) is in microsecond (us).
- *       All VCPUs of the same domain have same period and budget.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-int xc_sched_rtds_domain_set(xc_interface *xch,
-                           uint32_t domid,
-                           struct xen_domctl_sched_rtds *sdom)
-{
-    int rc;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_scheduler_op;
-    domctl.domain = domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
-    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
-    domctl.u.scheduler_op.u.rtds.period = sdom->period;
-    domctl.u.scheduler_op.u.rtds.budget = sdom->budget;
-
-    rc = do_domctl(xch, &domctl);
-
-    return rc;
-}
-
-int xc_sched_rtds_domain_get(xc_interface *xch,
-                           uint32_t domid,
-                           struct xen_domctl_sched_rtds *sdom)
-{
-    int rc;
-    DECLARE_DOMCTL;
-
-    domctl.cmd = XEN_DOMCTL_scheduler_op;
-    domctl.domain = domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
-    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
-
-    rc = do_domctl(xch, &domctl);
-
-    if ( rc == 0 )
-        *sdom = domctl.u.scheduler_op.u.rtds;
-
-    return rc;
-}
-
-int xc_sched_rtds_vcpu_set(xc_interface *xch,
-                           uint32_t domid,
-                           struct xen_domctl_schedparam_vcpu *vcpus,
-                           uint32_t num_vcpus)
-{
-    int rc = 0;
-    unsigned processed = 0;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(vcpus, sizeof(*vcpus) * num_vcpus,
-                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
-
-    if ( xc_hypercall_bounce_pre(xch, vcpus) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_scheduler_op;
-    domctl.domain = domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
-    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putvcpuinfo;
-
-    while ( processed < num_vcpus )
-    {
-        domctl.u.scheduler_op.u.v.nr_vcpus = num_vcpus - processed;
-        set_xen_guest_handle_offset(domctl.u.scheduler_op.u.v.vcpus, vcpus,
-                                    processed);
-        if ( (rc = do_domctl(xch, &domctl)) != 0 )
-            break;
-        processed += domctl.u.scheduler_op.u.v.nr_vcpus;
-    }
-
-    xc_hypercall_bounce_post(xch, vcpus);
-
-    return rc;
-}
-
-int xc_sched_rtds_vcpu_get(xc_interface *xch,
-                           uint32_t domid,
-                           struct xen_domctl_schedparam_vcpu *vcpus,
-                           uint32_t num_vcpus)
-{
-    int rc = 0;
-    unsigned processed = 0;
-    DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BOUNCE(vcpus, sizeof(*vcpus) * num_vcpus,
-                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
-
-    if ( xc_hypercall_bounce_pre(xch, vcpus) )
-        return -1;
-
-    domctl.cmd = XEN_DOMCTL_scheduler_op;
-    domctl.domain = domid;
-    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
-    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getvcpuinfo;
-
-    while ( processed < num_vcpus )
-    {
-        domctl.u.scheduler_op.u.v.nr_vcpus = num_vcpus - processed;
-        set_xen_guest_handle_offset(domctl.u.scheduler_op.u.v.vcpus, vcpus,
-                                    processed);
-        if ( (rc = do_domctl(xch, &domctl)) != 0 )
-            break;
-        processed += domctl.u.scheduler_op.u.v.nr_vcpus;
-    }
-
-    xc_hypercall_bounce_post(xch, vcpus);
-
-    return rc;
-}
diff --git a/tools/libxc/xc_solaris.c b/tools/libxc/xc_solaris.c
deleted file mode 100644 (file)
index 5128f3f..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/******************************************************************************
- *
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-#include <malloc.h>
-
-/* Optionally flush file to disk and discard page cache */
-void discard_file_cache(xc_interface *xch, int fd, int flush) 
-{
-    // TODO: Implement for Solaris!
-}
-
-void *xc_memalign(xc_interface *xch, size_t alignment, size_t size)
-{
-    return memalign(alignment, size);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/tools/libxc/xc_tbuf.c b/tools/libxc/xc_tbuf.c
deleted file mode 100644 (file)
index 283fbd1..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-/******************************************************************************
- * xc_tbuf.c
- *
- * API for manipulating and accessing trace buffer parameters
- *
- * Copyright (c) 2005, Rob Gardner
- *
- * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-#include <xen/trace.h>
-
-static int tbuf_enable(xc_interface *xch, int enable)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_tbuf_op;
-    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
-    if ( enable )
-        sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_enable;
-    else
-        sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_disable;
-
-    return xc_sysctl(xch, &sysctl);
-}
-
-int xc_tbuf_set_size(xc_interface *xch, unsigned long size)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_tbuf_op;
-    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
-    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_size;
-    sysctl.u.tbuf_op.size = size;
-
-    return xc_sysctl(xch, &sysctl);
-}
-
-int xc_tbuf_get_size(xc_interface *xch, unsigned long *size)
-{
-    struct t_info *t_info;
-    int rc;
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_tbuf_op;
-    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
-    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_get_info;
-
-    rc = xc_sysctl(xch, &sysctl);
-    if ( rc != 0 )
-        return rc;
-
-    t_info = xc_map_foreign_range(xch, DOMID_XEN,
-                    sysctl.u.tbuf_op.size, PROT_READ | PROT_WRITE,
-                    sysctl.u.tbuf_op.buffer_mfn);
-
-    if ( t_info == NULL || t_info->tbuf_size == 0 )
-        rc = -1;
-    else
-       *size = t_info->tbuf_size;
-
-    xenforeignmemory_unmap(xch->fmem, t_info, sysctl.u.tbuf_op.size);
-
-    return rc;
-}
-
-int xc_tbuf_enable(xc_interface *xch, unsigned long pages, unsigned long *mfn,
-                   unsigned long *size)
-{
-    DECLARE_SYSCTL;
-    int rc;
-
-    /*
-     * Ignore errors (at least for now) as we get an error if size is already
-     * set (since trace buffers cannot be reallocated). If we really have no
-     * buffers at all then tbuf_enable() will fail, so this is safe.
-     */
-    (void)xc_tbuf_set_size(xch, pages);
-
-    if ( tbuf_enable(xch, 1) != 0 )
-        return -1;
-
-    sysctl.cmd = XEN_SYSCTL_tbuf_op;
-    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
-    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_get_info;
-
-    rc = xc_sysctl(xch, &sysctl);
-    if ( rc == 0 )
-    {
-        *size = sysctl.u.tbuf_op.size;
-        *mfn = sysctl.u.tbuf_op.buffer_mfn;
-    }
-
-    return 0;
-}
-
-int xc_tbuf_disable(xc_interface *xch)
-{
-    return tbuf_enable(xch, 0);
-}
-
-int xc_tbuf_set_cpu_mask(xc_interface *xch, xc_cpumap_t mask)
-{
-    DECLARE_SYSCTL;
-    DECLARE_HYPERCALL_BOUNCE(mask, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
-    int ret = -1;
-    int bits, cpusize;
-
-    cpusize = xc_get_cpumap_size(xch);
-    if (cpusize <= 0)
-    {
-        PERROR("Could not get number of cpus");
-        return -1;
-    }
-
-    HYPERCALL_BOUNCE_SET_SIZE(mask, cpusize);
-
-    bits = xc_get_max_cpus(xch);
-    if (bits <= 0)
-    {
-        PERROR("Could not get number of bits");
-        return -1;
-    }
-
-    if ( xc_hypercall_bounce_pre(xch, mask) )
-    {
-        PERROR("Could not allocate memory for xc_tbuf_set_cpu_mask hypercall");
-        goto out;
-    }
-
-    sysctl.cmd = XEN_SYSCTL_tbuf_op;
-    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
-    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_cpu_mask;
-
-    set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, mask);
-    sysctl.u.tbuf_op.cpu_mask.nr_bits = bits;
-
-    ret = do_sysctl(xch, &sysctl);
-
-    xc_hypercall_bounce_post(xch, mask);
-
- out:
-    return ret;
-}
-
-int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask)
-{
-    DECLARE_SYSCTL;
-
-    sysctl.cmd = XEN_SYSCTL_tbuf_op;
-    sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
-    sysctl.u.tbuf_op.cmd  = XEN_SYSCTL_TBUFOP_set_evt_mask;
-    sysctl.u.tbuf_op.evt_mask = mask;
-
-    return do_sysctl(xch, &sysctl);
-}
-
diff --git a/tools/libxc/xc_vm_event.c b/tools/libxc/xc_vm_event.c
deleted file mode 100644 (file)
index a97c615..0000000
+++ /dev/null
@@ -1,183 +0,0 @@
-/******************************************************************************
- *
- * xc_vm_event.c
- *
- * Interface to low-level memory event functionality.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "xc_private.h"
-
-int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port)
-{
-    DECLARE_DOMCTL;
-    int rc;
-
-    domctl.cmd = XEN_DOMCTL_vm_event_op;
-    domctl.domain = domain_id;
-    domctl.u.vm_event_op.op = op;
-    domctl.u.vm_event_op.mode = mode;
-
-    rc = do_domctl(xch, &domctl);
-    if ( !rc && port )
-        *port = domctl.u.vm_event_op.u.enable.port;
-    return rc;
-}
-
-void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
-                         uint32_t *port)
-{
-    void *ring_page = NULL;
-    uint64_t pfn;
-    xen_pfn_t ring_pfn, mmap_pfn;
-    unsigned int op, mode;
-    int rc1, rc2, saved_errno;
-
-    if ( !port )
-    {
-        errno = EINVAL;
-        return NULL;
-    }
-
-    /* Pause the domain for ring page setup */
-    rc1 = xc_domain_pause(xch, domain_id);
-    if ( rc1 != 0 )
-    {
-        PERROR("Unable to pause domain\n");
-        return NULL;
-    }
-
-    /* Get the pfn of the ring page */
-    rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
-    if ( rc1 != 0 )
-    {
-        PERROR("Failed to get pfn of ring page\n");
-        goto out;
-    }
-
-    ring_pfn = pfn;
-    mmap_pfn = pfn;
-    rc1 = xc_get_pfn_type_batch(xch, domain_id, 1, &mmap_pfn);
-    if ( rc1 || mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
-    {
-        /* Page not in the physmap, try to populate it */
-        rc1 = xc_domain_populate_physmap_exact(xch, domain_id, 1, 0, 0,
-                                              &ring_pfn);
-        if ( rc1 != 0 )
-        {
-            PERROR("Failed to populate ring pfn\n");
-            goto out;
-        }
-    }
-
-    mmap_pfn = ring_pfn;
-    ring_page = xc_map_foreign_pages(xch, domain_id, PROT_READ | PROT_WRITE,
-                                         &mmap_pfn, 1);
-    if ( !ring_page )
-    {
-        PERROR("Could not map the ring page\n");
-        goto out;
-    }
-
-    switch ( param )
-    {
-    case HVM_PARAM_PAGING_RING_PFN:
-        op = XEN_VM_EVENT_ENABLE;
-        mode = XEN_DOMCTL_VM_EVENT_OP_PAGING;
-        break;
-
-    case HVM_PARAM_MONITOR_RING_PFN:
-        op = XEN_VM_EVENT_ENABLE;
-        mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
-        break;
-
-    case HVM_PARAM_SHARING_RING_PFN:
-        op = XEN_VM_EVENT_ENABLE;
-        mode = XEN_DOMCTL_VM_EVENT_OP_SHARING;
-        break;
-
-    /*
-     * This is for the outside chance that the HVM_PARAM is valid but is invalid
-     * as far as vm_event goes.
-     */
-    default:
-        errno = EINVAL;
-        rc1 = -1;
-        goto out;
-    }
-
-    rc1 = xc_vm_event_control(xch, domain_id, op, mode, port);
-    if ( rc1 != 0 )
-    {
-        PERROR("Failed to enable vm_event\n");
-        goto out;
-    }
-
-    /* Remove the ring_pfn from the guest's physmap */
-    rc1 = xc_domain_decrease_reservation_exact(xch, domain_id, 1, 0, &ring_pfn);
-    if ( rc1 != 0 )
-        PERROR("Failed to remove ring page from guest physmap");
-
- out:
-    saved_errno = errno;
-
-    rc2 = xc_domain_unpause(xch, domain_id);
-    if ( rc1 != 0 || rc2 != 0 )
-    {
-        if ( rc2 != 0 )
-        {
-            if ( rc1 == 0 )
-                saved_errno = errno;
-            PERROR("Unable to unpause domain");
-        }
-
-        if ( ring_page )
-            xenforeignmemory_unmap(xch->fmem, ring_page, 1);
-        ring_page = NULL;
-
-        errno = saved_errno;
-    }
-
-    return ring_page;
-}
-
-int xc_vm_event_get_version(xc_interface *xch)
-{
-    DECLARE_DOMCTL;
-    int rc;
-
-    domctl.cmd = XEN_DOMCTL_vm_event_op;
-    domctl.domain = DOMID_INVALID;
-    domctl.u.vm_event_op.op = XEN_VM_EVENT_GET_VERSION;
-    domctl.u.vm_event_op.mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
-
-    rc = do_domctl(xch, &domctl);
-    if ( !rc )
-        rc = domctl.u.vm_event_op.u.version;
-    return rc;
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
index 692a62584e88843697fe085d7714f87df21f4603..89ec3ec76a33a92839f5a9562545ad5bf6f3076e 100644 (file)
@@ -69,7 +69,7 @@ XENSTOREDLIBS = \
        -ccopt -L -ccopt $(OCAML_TOPLEVEL)/libs/eventchn $(OCAML_TOPLEVEL)/libs/eventchn/xeneventchn.cmxa \
        -ccopt -L -ccopt $(OCAML_TOPLEVEL)/libs/xc $(OCAML_TOPLEVEL)/libs/xc/xenctrl.cmxa \
        -ccopt -L -ccopt $(OCAML_TOPLEVEL)/libs/xb $(OCAML_TOPLEVEL)/libs/xb/xenbus.cmxa \
-       -ccopt -L -ccopt $(XEN_ROOT)/tools/libxc
+       -ccopt -L -ccopt $(XEN_ROOT)/tools/libs/ctrl
 
 PROGRAMS = oxenstored
 
index 8d22c03676571eb3d925178d24d3c0408bfed966..8dc755d6e8456f0f89c5781e118f4da50c1dc64a 100644 (file)
@@ -33,7 +33,7 @@ uninstall:
 
 .PHONY: test
 test:
-       LD_LIBRARY_PATH=$$(readlink -f ../libxc):$$(readlink -f ../xenstore) $(PYTHON) -m unittest discover
+       LD_LIBRARY_PATH=$$(readlink -f ../libs/ctrl):$$(readlink -f ../xenstore) $(PYTHON) -m unittest discover
 
 .PHONY: clean
 clean:
index 44696b3998ebd126c2682c81310abb224e50cac3..24b284af3929e31783315cad76af30996af5ad38 100644 (file)
@@ -9,7 +9,7 @@ extra_compile_args  = [ "-fno-strict-aliasing", "-Werror" ]
 PATH_XEN      = XEN_ROOT + "/tools/include"
 PATH_LIBXENTOOLLOG = XEN_ROOT + "/tools/libs/toollog"
 PATH_LIBXENEVTCHN = XEN_ROOT + "/tools/libs/evtchn"
-PATH_LIBXC    = XEN_ROOT + "/tools/libxc"
+PATH_LIBXENCTRL = XEN_ROOT + "/tools/libs/ctrl"
 PATH_LIBXL    = XEN_ROOT + "/tools/libxl"
 PATH_XENSTORE = XEN_ROOT + "/tools/xenstore"
 
@@ -18,11 +18,11 @@ xc = Extension("xc",
                include_dirs       = [ PATH_XEN,
                                       PATH_LIBXENTOOLLOG + "/include",
                                       PATH_LIBXENEVTCHN + "/include",
-                                      PATH_LIBXC + "/include",
+                                      PATH_LIBXENCTRL + "/include",
                                       "xen/lowlevel/xc" ],
-               library_dirs       = [ PATH_LIBXC ],
+               library_dirs       = [ PATH_LIBXENCTRL ],
                libraries          = [ "xenctrl" ],
-               depends            = [ PATH_LIBXC + "/libxenctrl.so" ],
+               depends            = [ PATH_LIBXENCTRL + "/libxenctrl.so" ],
                extra_link_args    = [ "-Wl,-rpath-link="+PATH_LIBXENTOOLLOG ],
                sources            = [ "xen/lowlevel/xc/xc.c" ])