copy_file(working, 'public', '.', 'sched.h')
copy_file(working, 'public', '.', 'event_channel.h')
copy_file(working, 'public', '.', 'grant_table.h')
+ copy_file(working, 'public', '.', 'physdev.h')
copy_file(working, 'public\\arch-x86', 'arch-x86', 'xen.h')
copy_file(working, 'public\\arch-x86', 'arch-x86', 'xen-x86_32.h')
#define __HYPERVISOR_VIRT_START_PAE 0xF5800000
#define __MACH2PHYS_VIRT_START_PAE 0xF5800000
#define __MACH2PHYS_VIRT_END_PAE 0xF6800000
-#define HYPERVISOR_VIRT_START_PAE \
- mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE)
-#define MACH2PHYS_VIRT_START_PAE \
- mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE)
-#define MACH2PHYS_VIRT_END_PAE \
- mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
+#define HYPERVISOR_VIRT_START_PAE xen_mk_ulong(__HYPERVISOR_VIRT_START_PAE)
+#define MACH2PHYS_VIRT_START_PAE xen_mk_ulong(__MACH2PHYS_VIRT_START_PAE)
+#define MACH2PHYS_VIRT_END_PAE xen_mk_ulong(__MACH2PHYS_VIRT_END_PAE)
/* Non-PAE bounds are obsolete. */
#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000
#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000
#define HYPERVISOR_VIRT_START_NONPAE \
- mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE)
+ xen_mk_ulong(__HYPERVISOR_VIRT_START_NONPAE)
#define MACH2PHYS_VIRT_START_NONPAE \
- mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE)
+ xen_mk_ulong(__MACH2PHYS_VIRT_START_NONPAE)
#define MACH2PHYS_VIRT_END_NONPAE \
- mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
+ xen_mk_ulong(__MACH2PHYS_VIRT_END_NONPAE)
#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE
#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE
#ifndef HYPERVISOR_VIRT_START
-#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
+#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START)
#endif
-#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
-#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END)
#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
#ifndef machine_to_phys_mapping
#define machine_to_phys_mapping ((ULONG_PTR *)MACH2PHYS_VIRT_START)
do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
(hnd).p = val; \
} while ( 0 )
+#define int64_aligned_t int64_t __attribute__((aligned(8)))
#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
#ifndef __ASSEMBLY__
+#if defined(XEN_GENERATING_COMPAT_HEADERS)
+/* nothing */
+#elif defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax). */
+#define __DECL_REG_LO8(which) union { \
+ uint32_t e ## which ## x; \
+ uint16_t which ## x; \
+ struct { \
+ uint8_t which ## l; \
+ uint8_t which ## h; \
+ }; \
+}
+#define __DECL_REG_LO16(name) union { \
+ uint32_t e ## name, _e ## name; \
+ uint16_t name; \
+}
+#else
+/* Other sources must always use the proper 32-bit name (e.g., eax). */
+#define __DECL_REG_LO8(which) uint32_t e ## which ## x
+#define __DECL_REG_LO16(name) uint32_t e ## name
+#endif
+
struct cpu_user_regs {
- uint32_t ebx;
- uint32_t ecx;
- uint32_t edx;
- uint32_t esi;
- uint32_t edi;
- uint32_t ebp;
- uint32_t eax;
+ __DECL_REG_LO8(b);
+ __DECL_REG_LO8(c);
+ __DECL_REG_LO8(d);
+ __DECL_REG_LO16(si);
+ __DECL_REG_LO16(di);
+ __DECL_REG_LO16(bp);
+ __DECL_REG_LO8(a);
uint16_t error_code; /* private */
uint16_t entry_vector; /* private */
- uint32_t eip;
+ __DECL_REG_LO16(ip);
uint16_t cs;
uint8_t saved_upcall_mask;
uint8_t _pad0;
- uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
- uint32_t esp;
+ __DECL_REG_LO16(flags); /* eflags.IF == !saved_upcall_mask */
+ __DECL_REG_LO16(sp);
uint16_t ss, _pad1;
uint16_t es, _pad2;
uint16_t ds, _pad3;
typedef struct cpu_user_regs cpu_user_regs_t;
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
+
/*
* Page-directory addresses above 4GB do not fit into architectural %cr3.
* When accessing %cr3, or equivalent field in vcpu_guest_context, guests
#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
#ifndef HYPERVISOR_VIRT_START
-#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
+#define HYPERVISOR_VIRT_START xen_mk_ulong(__HYPERVISOR_VIRT_START)
+#define HYPERVISOR_VIRT_END xen_mk_ulong(__HYPERVISOR_VIRT_END)
#endif
-#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
-#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_VIRT_START xen_mk_ulong(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END xen_mk_ulong(__MACH2PHYS_VIRT_END)
#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
#ifndef machine_to_phys_mapping
#define machine_to_phys_mapping ((ULONG_PTR *)HYPERVISOR_VIRT_START)
/* Bottom of iret stack frame. */
};
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax/rax). */
+#define __DECL_REG_LOHI(which) union { \
+ uint64_t r ## which ## x; \
+ uint32_t e ## which ## x; \
+ uint16_t which ## x; \
+ struct { \
+ uint8_t which ## l; \
+ uint8_t which ## h; \
+ }; \
+}
+#define __DECL_REG_LO8(name) union { \
+ uint64_t r ## name; \
+ uint32_t e ## name; \
+ uint16_t name; \
+ uint8_t name ## l; \
+}
+#define __DECL_REG_LO16(name) union { \
+ uint64_t r ## name; \
+ uint32_t e ## name; \
+ uint16_t name; \
+}
+#define __DECL_REG_HI(num) union { \
+ uint64_t r ## num; \
+ uint32_t r ## num ## d; \
+ uint16_t r ## num ## w; \
+ uint8_t r ## num ## b; \
+}
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
#define __DECL_REG(name) union { \
uint64_t r ## name, e ## name; \
#define __DECL_REG(name) uint64_t r ## name
#endif
+#ifndef __DECL_REG_LOHI
+#define __DECL_REG_LOHI(name) __DECL_REG(name ## x)
+#define __DECL_REG_LO8 __DECL_REG
+#define __DECL_REG_LO16 __DECL_REG
+#define __DECL_REG_HI(num) uint64_t r ## num
+#endif
+
struct cpu_user_regs {
- uint64_t r15;
- uint64_t r14;
- uint64_t r13;
- uint64_t r12;
- __DECL_REG(bp);
- __DECL_REG(bx);
- uint64_t r11;
- uint64_t r10;
- uint64_t r9;
- uint64_t r8;
- __DECL_REG(ax);
- __DECL_REG(cx);
- __DECL_REG(dx);
- __DECL_REG(si);
- __DECL_REG(di);
+ __DECL_REG_HI(15);
+ __DECL_REG_HI(14);
+ __DECL_REG_HI(13);
+ __DECL_REG_HI(12);
+ __DECL_REG_LO8(bp);
+ __DECL_REG_LOHI(b);
+ __DECL_REG_HI(11);
+ __DECL_REG_HI(10);
+ __DECL_REG_HI(9);
+ __DECL_REG_HI(8);
+ __DECL_REG_LOHI(a);
+ __DECL_REG_LOHI(c);
+ __DECL_REG_LOHI(d);
+ __DECL_REG_LO8(si);
+ __DECL_REG_LO8(di);
uint32_t error_code; /* private */
uint32_t entry_vector; /* private */
- __DECL_REG(ip);
+ __DECL_REG_LO16(ip);
uint16_t cs, _pad0[1];
uint8_t saved_upcall_mask;
uint8_t _pad1[3];
- __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
- __DECL_REG(sp);
+ __DECL_REG_LO16(flags); /* rflags.IF == !saved_upcall_mask */
+ __DECL_REG_LO8(sp);
uint16_t ss, _pad2[3];
uint16_t es, _pad3[3];
uint16_t ds, _pad4[3];
- uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
- uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
+ uint16_t fs, _pad5[3]; /* Non-nul => takes precedence over fs_base. */
+ uint16_t gs, _pad6[3]; /* Non-nul => takes precedence over gs_base_user. */
};
typedef struct cpu_user_regs cpu_user_regs_t;
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
#undef __DECL_REG
+#undef __DECL_REG_LOHI
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
+#undef __DECL_REG_HI
#define xen_pfn_to_cr3(pfn) ((ULONG_PTR)(pfn) << 12)
#define xen_cr3_to_pfn(cr3) ((ULONG_PTR)(cr3) >> 12)
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name)
#define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0)
-#ifdef __XEN_TOOLS__
-#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
-#endif
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#if defined(__i386__)
+# ifdef __XEN__
+__DeFiNe__ __DECL_REG_LO8(which) uint32_t e ## which ## x
+__DeFiNe__ __DECL_REG_LO16(name) union { uint32_t e ## name; }
+# endif
#include "xen-x86_32.h"
+# ifdef __XEN__
+__UnDeF__ __DECL_REG_LO8
+__UnDeF__ __DECL_REG_LO16
+__DeFiNe__ __DECL_REG_LO8(which) e ## which ## x
+__DeFiNe__ __DECL_REG_LO16(name) e ## name
+# endif
#elif defined(__x86_64__)
#include "xen-x86_64.h"
#endif
#ifndef __ASSEMBLY__
typedef ULONG_PTR xen_pfn_t;
#define PRI_xen_pfn "lx"
+#define PRIu_xen_pfn "lu"
#endif
#define XEN_HAVE_PV_GUEST_ENTRY 1
typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
/*
- * The following is all CPU context. Note that the fpu_ctxt block is filled
+ * The following is all CPU context. Note that the fpu_ctxt block is filled
* in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ *
+ * Also note that when calling DOMCTL_setvcpucontext for HVM guests, not all
+ * information in this structure is updated, the fields read include: fpu_ctxt
+ * (if VGCT_I387_VALID is set), flags, user_regs and debugreg[*].
+ *
+ * Note: VCPUOP_initialise for HVM guests is non-symetric with
+ * DOMCTL_setvcpucontext, and uses struct vcpu_hvm_context from hvm/hvm_vcpu.h
*/
struct vcpu_guest_context {
/* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
struct arch_shared_info {
- ULONG_PTR max_pfn; /* max pfn that appears in table */
- /* Frame containing list of mfns containing list of mfns containing p2m. */
+ /*
+ * Number of valid entries in the p2m table(s) anchored at
+ * pfn_to_mfn_frame_list_list and/or p2m_vaddr.
+ */
+ ULONG_PTR max_pfn;
+ /*
+ * Frame containing list of mfns containing list of mfns containing p2m.
+ * A value of 0 indicates it has not yet been set up, ~0 indicates it has
+ * been set to invalid e.g. due to the p2m being too large for the 3-level
+ * p2m tree. In this case the linear mapper p2m list anchored at p2m_vaddr
+ * is to be used.
+ */
xen_pfn_t pfn_to_mfn_frame_list_list;
ULONG_PTR nmi_reason;
- uint64_t pad[32];
+ /*
+ * Following three fields are valid if p2m_cr3 contains a value different
+ * from 0.
+ * p2m_cr3 is the root of the address space where p2m_vaddr is valid.
+ * p2m_cr3 is in the same format as a cr3 value in the vcpu register state
+ * and holds the folded machine frame number (via xen_pfn_to_cr3) of a
+ * L3 or L4 page table.
+ * p2m_vaddr holds the virtual address of the linear p2m list. All entries
+ * in the range [0...max_pfn[ are accessible via this pointer.
+ * p2m_generation will be incremented by the guest before and after each
+ * change of the mappings of the p2m list. p2m_generation starts at 0 and
+ * a value with the least significant bit set indicates that a mapping
+ * update is in progress. This allows guest external software (e.g. in Dom0)
+ * to verify that read mappings are consistent and whether they have changed
+ * since the last check.
+ * Modifying a p2m element in the linear p2m list is allowed via an atomic
+ * write only.
+ */
+ ULONG_PTR p2m_cr3; /* cr3 value of the p2m address space */
+ ULONG_PTR p2m_vaddr; /* virtual address of the p2m list */
+ ULONG_PTR p2m_generation; /* generation count of p2m mapping */
+#ifdef __i386__
+ /* There's no room for this field in the generic structure. */
+ uint32_t wc_sec_hi;
+#endif
};
typedef struct arch_shared_info arch_shared_info_t;
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/*
+ * struct xen_arch_domainconfig's ABI is covered by
+ * XEN_DOMCTL_INTERFACE_VERSION.
+ */
+struct xen_arch_domainconfig {
+#define _XEN_X86_EMU_LAPIC 0
+#define XEN_X86_EMU_LAPIC (1U<<_XEN_X86_EMU_LAPIC)
+#define _XEN_X86_EMU_HPET 1
+#define XEN_X86_EMU_HPET (1U<<_XEN_X86_EMU_HPET)
+#define _XEN_X86_EMU_PM 2
+#define XEN_X86_EMU_PM (1U<<_XEN_X86_EMU_PM)
+#define _XEN_X86_EMU_RTC 3
+#define XEN_X86_EMU_RTC (1U<<_XEN_X86_EMU_RTC)
+#define _XEN_X86_EMU_IOAPIC 4
+#define XEN_X86_EMU_IOAPIC (1U<<_XEN_X86_EMU_IOAPIC)
+#define _XEN_X86_EMU_PIC 5
+#define XEN_X86_EMU_PIC (1U<<_XEN_X86_EMU_PIC)
+#define _XEN_X86_EMU_VGA 6
+#define XEN_X86_EMU_VGA (1U<<_XEN_X86_EMU_VGA)
+#define _XEN_X86_EMU_IOMMU 7
+#define XEN_X86_EMU_IOMMU (1U<<_XEN_X86_EMU_IOMMU)
+#define _XEN_X86_EMU_PIT 8
+#define XEN_X86_EMU_PIT (1U<<_XEN_X86_EMU_PIT)
+#define _XEN_X86_EMU_USE_PIRQ 9
+#define XEN_X86_EMU_USE_PIRQ (1U<<_XEN_X86_EMU_USE_PIRQ)
+#define _XEN_X86_EMU_VPCI 10
+#define XEN_X86_EMU_VPCI (1U<<_XEN_X86_EMU_VPCI)
+
+#define XEN_X86_EMU_ALL (XEN_X86_EMU_LAPIC | XEN_X86_EMU_HPET | \
+ XEN_X86_EMU_PM | XEN_X86_EMU_RTC | \
+ XEN_X86_EMU_IOAPIC | XEN_X86_EMU_PIC | \
+ XEN_X86_EMU_VGA | XEN_X86_EMU_IOMMU | \
+ XEN_X86_EMU_PIT | XEN_X86_EMU_USE_PIRQ |\
+ XEN_X86_EMU_VPCI)
+ uint32_t emulation_flags;
+};
+
+/* Location of online VCPU bitmap. */
+#define XEN_ACPI_CPU_MAP 0xaf00
+#define XEN_ACPI_CPU_MAP_LEN ((HVM_MAX_VCPUS + 7) / 8)
+
+/* GPE0 bit set during CPU hotplug */
+#define XEN_ACPI_GPE0_CPUHP_BIT 2
+#endif
+
#endif /* !__ASSEMBLY__ */
/*
#define EVTCHNOP_bind_vcpu 8
#define EVTCHNOP_unmask 9
#define EVTCHNOP_reset 10
+#define EVTCHNOP_init_control 11
+#define EVTCHNOP_expand_array 12
+#define EVTCHNOP_set_priority 13
/* ` } */
typedef uint32_t evtchn_port_t;
* is allocated in <dom> and returned as <port>.
* NOTES:
* 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
- * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
+ * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
*/
struct evtchn_alloc_unbound {
/* IN parameters */
* a port that is unbound and marked as accepting bindings from the calling
* domain. A fresh port is allocated in the calling domain and returned as
* <local_port>.
+ *
+ * In case the peer domain has already tried to set our event channel
+ * pending, before it was bound, EVTCHNOP_bind_interdomain always sets
+ * the local event channel pending.
+ *
+ * The usual pattern of use, in the guest's upcall (or subsequent
+ * handler) is as follows: (Re-enable the event channel for subsequent
+ * signalling and then) check for the existence of whatever condition
+ * is being waited for by other means, and take whatever action is
+ * needed (if any).
+ *
* NOTES:
* 1. <remote_dom> may be DOMID_SELF, allowing loopback connections.
*/
* NOTES:
* 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
+ * 3. Destroys all control blocks and event array, resets event channel
+ * operations to 2-level ABI if called with <dom> == DOMID_SELF and FIFO
+ * ABI was used. Guests should not bind events during EVTCHNOP_reset call
+ * as these events are likely to be lost.
*/
struct evtchn_reset {
/* IN parameters. */
};
typedef struct evtchn_reset evtchn_reset_t;
+/*
+ * EVTCHNOP_init_control: initialize the control block for the FIFO ABI.
+ *
+ * Note: any events that are currently pending will not be resent and
+ * will be lost. Guests should call this before binding any event to
+ * avoid losing any events.
+ */
+struct evtchn_init_control {
+ /* IN parameters. */
+ uint64_t control_gfn;
+ uint32_t offset;
+ uint32_t vcpu;
+ /* OUT parameters. */
+ uint8_t link_bits;
+ uint8_t _pad[7];
+};
+typedef struct evtchn_init_control evtchn_init_control_t;
+
+/*
+ * EVTCHNOP_expand_array: add an additional page to the event array.
+ */
+struct evtchn_expand_array {
+ /* IN parameters. */
+ uint64_t array_gfn;
+};
+typedef struct evtchn_expand_array evtchn_expand_array_t;
+
+/*
+ * EVTCHNOP_set_priority: set the priority for an event channel.
+ */
+struct evtchn_set_priority {
+ /* IN parameters. */
+ uint32_t port;
+ uint32_t priority;
+};
+typedef struct evtchn_set_priority evtchn_set_priority_t;
+
/*
* ` enum neg_errnoval
* ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op)
typedef struct evtchn_op evtchn_op_t;
DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
+/*
+ * 2-level ABI
+ */
+
+#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
+
+/*
+ * FIFO ABI
+ */
+
+/* Events may have priorities from 0 (highest) to 15 (lowest). */
+#define EVTCHN_FIFO_PRIORITY_MAX 0
+#define EVTCHN_FIFO_PRIORITY_DEFAULT 7
+#define EVTCHN_FIFO_PRIORITY_MIN 15
+
+#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1)
+
+typedef uint32_t event_word_t;
+
+#define EVTCHN_FIFO_PENDING 31
+#define EVTCHN_FIFO_MASKED 30
+#define EVTCHN_FIFO_LINKED 29
+#define EVTCHN_FIFO_BUSY 28
+
+#define EVTCHN_FIFO_LINK_BITS 17
+#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1)
+
+#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS)
+
+struct evtchn_fifo_control_block {
+ uint32_t ready;
+ uint32_t _rsvd;
+ uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
+};
+typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t;
+
#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
/*
* table are identified by grant references. A grant reference is an
* integer, which indexes into the grant table. It acts as a
* capability which the grantee can use to perform operations on the
- * granter’s memory.
+ * granter's memory.
*
* This capability-based system allows shared-memory communications
* between unprivileged domains. A grant reference also encapsulates
/* The domain being granted foreign privileges. [GST] */
domid_t domid;
/*
- * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
- * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
+ * GTF_permit_access: GFN that @domid is allowed to map and access. [GST]
+ * GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST]
+ * GTF_transfer_completed: MFN whose ownership transferred by @domid
+ * (non-translated guests only). [XEN]
*/
uint32_t frame;
};
#define GNTTABOP_get_status_frames 9
#define GNTTABOP_get_version 10
#define GNTTABOP_swap_grant_ref 11
+#define GNTTABOP_cache_flush 12
#endif /* __XEN_INTERFACE_VERSION__ */
/* ` } */
/*
* GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
* by devices and/or host CPUs. If successful, <handle> is a tracking number
- * that must be presented later to destroy the mapping(s). On error, <handle>
+ * that must be presented later to destroy the mapping(s). On error, <status>
* is a negative status code.
* NOTES:
* 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
/*
- * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
- * foreign domain has previously registered its interest in the transfer via
- * <domid, ref>.
+ * GNTTABOP_transfer: Transfer <frame> to a foreign domain. The foreign domain
+ * has previously registered its interest in the transfer via <domid, ref>.
*
* Note that, even if the transfer fails, the specified page no LONG_PTRer belongs
* to the calling domain *unless* the error is GNTST_bad_page.
+ *
+ * Note further that only PV guests can use this operation.
*/
struct gnttab_transfer {
/* IN parameters. */
struct gnttab_copy {
/* IN parameters. */
- struct {
+ struct gnttab_copy_ptr {
union {
grant_ref_t ref;
xen_pfn_t gmfn;
#if __XEN_INTERFACE_VERSION__ >= 0x0003020a
/*
* GNTTABOP_set_version: Request a particular version of the grant
- * table shared table structure. This operation can only be performed
- * once in any given domain. It must be performed before any grants
- * are activated; otherwise, the domain will be stuck with version 1.
- * The only defined versions are 1 and 2.
+ * table shared table structure. This operation may be used to toggle
+ * between different versions, but must be performed while no grants
+ * are active. The only defined versions are 1 and 2.
*/
struct gnttab_set_version {
/* IN/OUT parameters */
typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t;
DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t);
+/*
+ * Issue one or more cache maintenance operations on a portion of a
+ * page granted to the calling domain by a foreign domain.
+ */
+struct gnttab_cache_flush {
+ union {
+ uint64_t dev_bus_addr;
+ grant_ref_t ref;
+ } a;
+ uint16_t offset; /* offset from start of grant */
+ uint16_t length; /* size within the grant */
+#define GNTTAB_CACHE_CLEAN (1u<<0)
+#define GNTTAB_CACHE_INVAL (1u<<1)
+#define GNTTAB_CACHE_SOURCE_GREF (1u<<31)
+ uint32_t op;
+};
+typedef struct gnttab_cache_flush gnttab_cache_flush_t;
+DEFINE_XEN_GUEST_HANDLE(gnttab_cache_flush_t);
+
#endif /* __XEN_INTERFACE_VERSION__ */
/*
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2007, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
#include "../xen.h"
#include "../trace.h"
+#include "../event_channel.h"
/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
#define HVMOP_set_param 0
typedef struct xen_hvm_param xen_hvm_param_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
+#if __XEN_INTERFACE_VERSION__ < 0x00040900
+
/* Set the logical level of one of a domain's PCI INTx wires. */
#define HVMOP_set_pci_intx_level 2
struct xen_hvm_set_pci_intx_level {
typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
+
/* Flushes all VCPU TLBs: @arg must be NULL. */
#define HVMOP_flush_tlbs 5
+/*
+ * hvmmem_type_t should not be defined when generating the corresponding
+ * compat header. This will ensure that the improperly named HVMMEM_(*)
+ * values are defined only once.
+ */
+#ifndef XEN_GENERATING_COMPAT_HEADERS
+
typedef enum {
HVMMEM_ram_rw, /* Normal read/write guest RAM */
HVMMEM_ram_ro, /* Read-only; writes are discarded */
HVMMEM_mmio_dm, /* Reads and write go to the device model */
+#if __XEN_INTERFACE_VERSION__ < 0x00040700
+ HVMMEM_mmio_write_dm, /* Read-only; writes go to the device model */
+#else
+ HVMMEM_unused, /* Placeholder; setting memory to this type
+ will fail for code after 4.7.0 */
+#endif
+ HVMMEM_ioreq_server /* Memory type claimed by an ioreq server; type
+ changes to this value are only allowed after
+ an ioreq server has claimed its ownership.
+ Only pages with HVMMEM_ram_rw are allowed to
+ change to this type; conversely, pages with
+ this type are only allowed to be changed back
+ to HVMMEM_ram_rw. */
} hvmmem_type_t;
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-/* Track dirty VRAM. */
-#define HVMOP_track_dirty_vram 6
-struct xen_hvm_track_dirty_vram {
- /* Domain to be tracked. */
- domid_t domid;
- /* First pfn to track. */
- uint64_aligned_t first_pfn;
- /* Number of pages to track. */
- uint64_aligned_t nr;
- /* OUT variable. */
- /* Dirty bitmap buffer. */
- XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
-};
-typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
-
-/* Notify that some pages got modified by the Device Model. */
-#define HVMOP_modified_memory 7
-struct xen_hvm_modified_memory {
- /* Domain to be updated. */
- domid_t domid;
- /* First pfn. */
- uint64_aligned_t first_pfn;
- /* Number of pages. */
- uint64_aligned_t nr;
-};
-typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
-
-#define HVMOP_set_mem_type 8
-/* Notify that a region of memory is to be treated in a specific way. */
-struct xen_hvm_set_mem_type {
- /* Domain to be updated. */
- domid_t domid;
- /* Memory type */
- uint16_t hvmmem_type;
- /* Number of pages. */
- uint32_t nr;
- /* First pfn. */
- uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+#endif /* XEN_GENERATING_COMPAT_HEADERS */
/* Hint from PV drivers for pagetable destruction. */
#define HVMOP_pagetable_dying 9
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Deprecated by XENMEM_access_op_set_access */
#define HVMOP_set_mem_access 12
-typedef enum {
- HVMMEM_access_n,
- HVMMEM_access_r,
- HVMMEM_access_w,
- HVMMEM_access_rw,
- HVMMEM_access_x,
- HVMMEM_access_rx,
- HVMMEM_access_wx,
- HVMMEM_access_rwx,
- HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically
- * change to r-w on a write */
- HVMMEM_access_n2rwx, /* Log access: starts off as n, automatically
- * goes to rwx, generating an event without
- * pausing the vcpu */
- HVMMEM_access_default /* Take the domain default */
-} hvmmem_access_t;
-/* Notify that a region of memory is to have specific access types */
-struct xen_hvm_set_mem_access {
- /* Domain to be updated. */
- domid_t domid;
- /* Memory type */
- uint16_t hvmmem_access; /* hvm_access_t */
- /* Number of pages, ignored on setting default access */
- uint32_t nr;
- /* First pfn, or ~0ull to set the default access for new pages */
- uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
+/* Deprecated by XENMEM_access_op_get_access */
#define HVMOP_get_mem_access 13
-/* Get the specific access type for that region of memory */
-struct xen_hvm_get_mem_access {
- /* Domain to be queried. */
- domid_t domid;
- /* Memory type: OUT */
- uint16_t hvmmem_access; /* hvm_access_t */
- /* pfn, or ~0ull for default access for new pages. IN */
- uint64_aligned_t pfn;
-};
-typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
-
-#define HVMOP_inject_trap 14
-/* Inject a trap into a VCPU, which will get taken up on the next
- * scheduling of it. Note that the caller should know enough of the
- * state of the CPU before injecting, to know what the effect of
- * injecting the trap will be.
- */
-struct xen_hvm_inject_trap {
- /* Domain to be queried. */
- domid_t domid;
- /* VCPU */
- uint32_t vcpuid;
- /* Vector number */
- uint32_t vector;
- /* Trap type (HVMOP_TRAP_*) */
- uint32_t type;
-/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
-# define HVMOP_TRAP_ext_int 0 /* external interrupt */
-# define HVMOP_TRAP_nmi 2 /* nmi */
-# define HVMOP_TRAP_hw_exc 3 /* hardware exception */
-# define HVMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */
-# define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
-# define HVMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */
- /* Error code, or ~0u to skip */
- uint32_t error_code;
- /* Intruction length */
- uint32_t insn_len;
- /* CR2 for page faults */
- uint64_aligned_t cr2;
-};
-typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
-/* MSI injection for emulated devices */
-#define HVMOP_inject_msi 16
-struct xen_hvm_inject_msi {
- /* Domain to be injected */
- domid_t domid;
- /* Data -- lower 32 bits */
- uint32_t data;
- /* Address (0xfeexxxxx) */
- uint64_t addr;
-};
-typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
+/*
+ * Definitions relating to DMOP_create_ioreq_server. (Defined here for
+ * backwards compatibility).
+ */
+
+#define HVM_IOREQSRV_BUFIOREQ_OFF 0
+#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
+/*
+ * Use this when read_pointer gets updated atomically and
+ * the pointer pair gets read atomically:
+ */
+#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
+ * channel upcalls on the specified <vcpu>. If set,
+ * this vector will be used in preference to the
+ * domain global callback via (see
+ * HVM_PARAM_CALLBACK_IRQ).
+ */
+#define HVMOP_set_evtchn_upcall_vector 23
+struct xen_hvm_evtchn_upcall_vector {
+ uint32_t vcpu;
+ uint8_t vector;
+};
+typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t);
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+#define HVMOP_guest_request_vm_event 24
+
+/* HVMOP_altp2m: perform altp2m state operations */
+#define HVMOP_altp2m 25
+
+#define HVMOP_ALTP2M_INTERFACE_VERSION 0x00000001
+
+struct xen_hvm_altp2m_domain_state {
+ /* IN or OUT variable on/off */
+ uint8_t state;
+};
+typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t);
+
+struct xen_hvm_altp2m_vcpu_enable_notify {
+ uint32_t vcpu_id;
+ uint32_t pad;
+ /* #VE info area gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_vcpu_enable_notify xen_hvm_altp2m_vcpu_enable_notify_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
+
+struct xen_hvm_altp2m_view {
+ /* IN/OUT variable */
+ uint16_t view;
+ /* Create view only: default access type
+ * NOTE: currently ignored */
+ uint16_t hvmmem_default_access; /* xenmem_access_t */
+};
+typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
+
+struct xen_hvm_altp2m_set_mem_access {
+ /* view */
+ uint16_t view;
+ /* Memory type */
+ uint16_t hvmmem_access; /* xenmem_access_t */
+ uint32_t pad;
+ /* gfn */
+ uint64_t gfn;
+};
+typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
+
+struct xen_hvm_altp2m_set_mem_access_multi {
+ /* view */
+ uint16_t view;
+ uint16_t pad;
+ /* Number of pages */
+ uint32_t nr;
+ /*
+ * Used for continuation purposes.
+ * Must be set to zero upon initial invocation.
+ */
+ uint64_t opaque;
+ /* List of pfns to set access for */
+ XEN_GUEST_HANDLE(const_uint64) pfn_list;
+ /* Corresponding list of access settings for pfn_list */
+ XEN_GUEST_HANDLE(const_uint8) access_list;
+};
+
+struct xen_hvm_altp2m_change_gfn {
+ /* view */
+ uint16_t view;
+ uint16_t pad1;
+ uint32_t pad2;
+ /* old gfn */
+ uint64_t old_gfn;
+ /* new gfn, INVALID_GFN (~0UL) means revert */
+ uint64_t new_gfn;
+};
+typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t);
+
+struct xen_hvm_altp2m_op {
+ uint32_t version; /* HVMOP_ALTP2M_INTERFACE_VERSION */
+ uint32_t cmd;
+/* Get/set the altp2m state for a domain */
+#define HVMOP_altp2m_get_domain_state 1
+#define HVMOP_altp2m_set_domain_state 2
+/* Set the current VCPU to receive altp2m event notifications */
+#define HVMOP_altp2m_vcpu_enable_notify 3
+/* Create a new view */
+#define HVMOP_altp2m_create_p2m 4
+/* Destroy a view */
+#define HVMOP_altp2m_destroy_p2m 5
+/* Switch view for an entire domain */
+#define HVMOP_altp2m_switch_p2m 6
+/* Notify that a page of memory is to have specific access types */
+#define HVMOP_altp2m_set_mem_access 7
+/* Change a p2m entry to have a different gfn->mfn mapping */
+#define HVMOP_altp2m_change_gfn 8
+/* Set access for an array of pages */
+#define HVMOP_altp2m_set_mem_access_multi 9
+ domid_t domain;
+ uint16_t pad1;
+ uint32_t pad2;
+ union {
+ struct xen_hvm_altp2m_domain_state domain_state;
+ struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
+ struct xen_hvm_altp2m_view view;
+ struct xen_hvm_altp2m_set_mem_access set_mem_access;
+ struct xen_hvm_altp2m_change_gfn change_gfn;
+ struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi;
+ uint8_t pad[64];
+ } u;
+};
+typedef struct xen_hvm_altp2m_op xen_hvm_altp2m_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_op_t);
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2007, Keir Fraser
*/
#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
* Parameter space for HVMOP_{set,get}_param.
*/
+#define HVM_PARAM_CALLBACK_IRQ 0
+#define HVM_PARAM_CALLBACK_IRQ_TYPE_MASK xen_mk_ullong(0xFF00000000000000)
/*
* How should CPU0 event-channel notifications be delivered?
- * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt).
- * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
- * Domain = val[47:32], Bus = val[31:16],
- * DevFn = val[15: 8], IntX = val[ 1: 0]
- * val[63:56] == 2: val[7:0] is a vector number, check for
- * XENFEAT_hvm_callback_vector to know if this delivery
- * method is available.
+ *
* If val == 0 then CPU0 event-channel notifications are not delivered.
+ * If val != 0, val[63:56] encodes the type, as follows:
*/
-#define HVM_PARAM_CALLBACK_IRQ 0
+
+#define HVM_PARAM_CALLBACK_TYPE_GSI 0
+/*
+ * val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0,
+ * and disables all notifications.
+ */
+
+#define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1
+/*
+ * val[55:0] is a delivery PCI INTx line:
+ * Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0]
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+#define HVM_PARAM_CALLBACK_TYPE_VECTOR 2
+/*
+ * val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know
+ * if this delivery method is available.
+ */
+#elif defined(__arm__) || defined(__aarch64__)
+#define HVM_PARAM_CALLBACK_TYPE_PPI 2
+/*
+ * val[55:16] needs to be zero.
+ * val[15:8] is interrupt flag of the PPI used by event-channel:
+ * bit 8: the PPI is edge(1) or level(0) triggered
+ * bit 9: the PPI is active low(1) or high(0)
+ * val[7:0] is a PPI number used by event-channel.
+ * This is only used by ARM/ARM64 and masking/eoi the interrupt associated to
+ * the notification is handled by the interrupt controller.
+ */
+#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_MASK 0xFF00
+#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_LOW_LEVEL 2
+#endif
/*
* These are not used by Xen. They are here for convenience of HVM-guest
#if defined(__i386__) || defined(__x86_64__)
-/* Expose Viridian interfaces to this HVM guest? */
+/*
+ * Viridian enlightenments
+ *
+ * (See http://download.microsoft.com/download/A/B/4/AB43A34E-BDD0-4FA6-BDEF-79EEF16E880B/Hypervisor%20Top%20Level%20Functional%20Specification%20v4.0.docx)
+ *
+ * To expose viridian enlightenments to the guest set this parameter
+ * to the desired feature mask. The base feature set must be present
+ * in any valid feature mask.
+ */
#define HVM_PARAM_VIRIDIAN 9
+/* Base+Freq viridian feature sets:
+ *
+ * - Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL)
+ * - APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
+ * - Virtual Processor index MSR (HV_X64_MSR_VP_INDEX)
+ * - Timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
+ * HV_X64_MSR_APIC_FREQUENCY)
+ */
+#define _HVMPV_base_freq 0
+#define HVMPV_base_freq (1 << _HVMPV_base_freq)
+
+/* Feature set modifications */
+
+/* Disable timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and
+ * HV_X64_MSR_APIC_FREQUENCY).
+ * This modification restores the viridian feature set to the
+ * original 'base' set exposed in releases prior to Xen 4.4.
+ */
+#define _HVMPV_no_freq 1
+#define HVMPV_no_freq (1 << _HVMPV_no_freq)
+
+/* Enable Partition Time Reference Counter (HV_X64_MSR_TIME_REF_COUNT) */
+#define _HVMPV_time_ref_count 2
+#define HVMPV_time_ref_count (1 << _HVMPV_time_ref_count)
+
+/* Enable Reference TSC Page (HV_X64_MSR_REFERENCE_TSC) */
+#define _HVMPV_reference_tsc 3
+#define HVMPV_reference_tsc (1 << _HVMPV_reference_tsc)
+
+/* Use Hypercall for remote TLB flush */
+#define _HVMPV_hcall_remote_tlb_flush 4
+#define HVMPV_hcall_remote_tlb_flush (1 << _HVMPV_hcall_remote_tlb_flush)
+
+/* Use APIC assist */
+#define _HVMPV_apic_assist 5
+#define HVMPV_apic_assist (1 << _HVMPV_apic_assist)
+
+/* Enable crash MSRs */
+#define _HVMPV_crash_ctl 6
+#define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl)
+
+#define HVMPV_feature_mask \
+ (HVMPV_base_freq | \
+ HVMPV_no_freq | \
+ HVMPV_time_ref_count | \
+ HVMPV_reference_tsc | \
+ HVMPV_hcall_remote_tlb_flush | \
+ HVMPV_apic_assist | \
+ HVMPV_crash_ctl)
+
#endif
/*
*/
#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
-/* Enable blocking memory events, async or sync (pause vcpu until response)
- * onchangeonly indicates messages only on a change of value */
+/* Deprecated */
#define HVM_PARAM_MEMORY_EVENT_CR0 20
#define HVM_PARAM_MEMORY_EVENT_CR3 21
#define HVM_PARAM_MEMORY_EVENT_CR4 22
#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
#define HVM_PARAM_MEMORY_EVENT_MSR 30
-#define HVMPME_MODE_MASK (3 << 0)
-#define HVMPME_mode_disabled 0
-#define HVMPME_mode_async 1
-#define HVMPME_mode_sync 2
-#define HVMPME_onchangeonly (1 << 2)
-
/* Boolean: Enable nestedhvm (hvm only) */
#define HVM_PARAM_NESTEDHVM 24
/* Params for the mem event rings */
#define HVM_PARAM_PAGING_RING_PFN 27
-#define HVM_PARAM_ACCESS_RING_PFN 28
+#define HVM_PARAM_MONITOR_RING_PFN 28
#define HVM_PARAM_SHARING_RING_PFN 29
/* SHUTDOWN_* action in case of a triple fault */
#define HVM_PARAM_TRIPLE_FAULT_REASON 31
-#define HVM_NR_PARAMS 32
+#define HVM_PARAM_IOREQ_SERVER_PFN 32
+#define HVM_PARAM_NR_IOREQ_SERVER_PAGES 33
+
+/* Location of the VM Generation ID in guest physical address space. */
+#define HVM_PARAM_VM_GENERATION_ID_ADDR 34
+
+/*
+ * Set mode for altp2m:
+ * disabled: don't activate altp2m (default)
+ * mixed: allow access to all altp2m ops for both in-guest and external tools
+ * external: allow access to external privileged tools only
+ * limited: guest only has limited access (ie. control VMFUNC and #VE)
+ */
+#define HVM_PARAM_ALTP2M 35
+#define XEN_ALTP2M_disabled 0
+#define XEN_ALTP2M_mixed 1
+#define XEN_ALTP2M_external 2
+#define XEN_ALTP2M_limited 3
+
+/*
+ * Size of the x87 FPU FIP/FDP registers that the hypervisor needs to
+ * save/restore. This is a workaround for a hardware limitation that
+ * does not allow the full FIP/FDP and FCS/FDS to be restored.
+ *
+ * Valid values are:
+ *
+ * 8: save/restore 64-bit FIP/FDP and clear FCS/FDS (default if CPU
+ * has FPCSDS feature).
+ *
+ * 4: save/restore 32-bit FIP/FDP, FCS/FDS, and clear upper 32-bits of
+ * FIP/FDP.
+ *
+ * 0: allow hypervisor to choose based on the value of FIP/FDP
+ * (default if CPU does not have FPCSDS).
+ *
+ * If FPCSDS (bit 13 in CPUID leaf 0x7, subleaf 0x0) is set, the CPU
+ * never saves FCS/FDS and this parameter should be left at the
+ * default of 8.
+ */
+#define HVM_PARAM_X87_FIP_WIDTH 36
+
+/*
+ * TSS (and its size) used on Intel when CR0.PE=0. The address occupies
+ * the low 32 bits, while the size is in the high 32 ones.
+ */
+#define HVM_PARAM_VM86_TSS_SIZED 37
+
+/* Enable MCA capabilities. */
+#define HVM_PARAM_MCA_CAP 38
+#define XEN_HVM_MCA_CAP_LMCE (xen_mk_ullong(1) << 0)
+#define XEN_HVM_MCA_CAP_MASK XEN_HVM_MCA_CAP_LMCE
+
+#define HVM_NR_PARAMS 39
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
* Values: string
*
* A free formatted string providing sufficient information for the
- * backend driver to open the backing device. (e.g. the path to the
- * file or block device representing the backing store.)
+ * hotplug script to attach the device and provide a suitable
+ * handler (ie: a block device) for blkback to use.
+ *
+ * physical-device
+ * Values: "MAJOR:MINOR"
+ * Notes: 11
+ *
+ * MAJOR and MINOR are the major number and minor number of the
+ * backing device respectively.
+ *
+ * physical-device-path
+ * Values: path string
+ *
+ * A string that contains the absolute path to the disk image. On
+ * NetBSD and Linux this is always a block device, while on FreeBSD
+ * it can be either a block device or a regular file.
*
* type
* Values: "file", "phy", "tap"
*
* The type of the backing device/object.
*
+ *
+ * direct-io-safe
+ * Values: 0/1 (boolean)
+ * Default Value: 0
+ *
+ * The underlying storage is not affected by the direct IO memory
+ * lifetime bug. See:
+ * http://lists.xen.org/archives/html/xen-devel/2012-12/msg01154.html
+ *
+ * Therefore this option gives the backend permission to use
+ * O_DIRECT, notwithstanding that bug.
+ *
+ * That is, if this option is enabled, use of O_DIRECT is safe,
+ * in circumstances where we would normally have avoided it as a
+ * workaround for that bug. This option is not relevant for all
+ * backends, and even not necessarily supported for those for
+ * which it is relevant. A backend which knows that it is not
+ * affected by the bug can ignore this option.
+ *
+ * This option doesn't require a backend to use O_DIRECT, so it
+ * should not be used to try to control the caching behaviour.
+ *
*--------------------------------- Features ---------------------------------
*
* feature-barrier
*
*------------------------- Backend Device Properties -------------------------
*
+ * discard-enable
+ * Values: 0/1 (boolean)
+ * Default Value: 1
+ *
+ * This optional property, set by the toolstack, instructs the backend
+ * to offer (or not to offer) discard to the frontend. If the property
+ * is missing the backend should offer discard if the backing storage
+ * actually supports it.
+ *
* discard-alignment
* Values: <uint32_t>
* Default Value: 0
* than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
*(10) The discard-secure property may be present and will be set to 1 if the
* backing device supports secure discard.
+ *(11) Only used by Linux and NetBSD.
+ */
+
+/*
+ * Multiple hardware queues/rings:
+ * If supported, the backend will write the key "multi-queue-max-queues" to
+ * the directory for that vbd, and set its value to the maximum supported
+ * number of queues.
+ * Frontends that are aware of this feature and wish to use it can write the
+ * key "multi-queue-num-queues" with the number they wish to use, which must be
+ * greater than zero, and no more than the value reported by the backend in
+ * "multi-queue-max-queues".
+ *
+ * For frontends requesting just one queue, the usual event-channel and
+ * ring-ref keys are written as before, simplifying the backend processing
+ * to avoid distinguishing between a frontend that doesn't understand the
+ * multi-queue feature, and one that does, but requested only one queue.
+ *
+ * Frontends requesting two or more queues must not write the toplevel
+ * event-channel and ring-ref keys, instead writing those keys under sub-keys
+ * having the name "queue-N" where N is the integer ID of the queue/ring for
+ * which those keys belong. Queues are indexed from zero.
+ * For example, a frontend with two queues must write the following set of
+ * queue-related keys:
+ *
+ * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
+ * /local/domain/1/device/vbd/0/queue-0 = ""
+ * /local/domain/1/device/vbd/0/queue-0/ring-ref = "<ring-ref#0>"
+ * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
+ * /local/domain/1/device/vbd/0/queue-1 = ""
+ * /local/domain/1/device/vbd/0/queue-1/ring-ref = "<ring-ref#1>"
+ * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
+ *
+ * It is also possible to use multiple queues/rings together with
+ * feature multi-page ring buffer.
+ * For example, a frontend requests two queues/rings and the size of each ring
+ * buffer is two pages must write the following set of related keys:
+ *
+ * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
+ * /local/domain/1/device/vbd/0/ring-page-order = "1"
+ * /local/domain/1/device/vbd/0/queue-0 = ""
+ * /local/domain/1/device/vbd/0/queue-0/ring-ref0 = "<ring-ref#0>"
+ * /local/domain/1/device/vbd/0/queue-0/ring-ref1 = "<ring-ref#1>"
+ * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
+ * /local/domain/1/device/vbd/0/queue-1 = ""
+ * /local/domain/1/device/vbd/0/queue-1/ring-ref0 = "<ring-ref#2>"
+ * /local/domain/1/device/vbd/0/queue-1/ring-ref1 = "<ring-ref#3>"
+ * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
+ *
*/
/*
*/
#define BLKIF_OP_RESERVED_1 4
/*
- * Indicate to the backend device that a region of storage is no longer in
+ * Indicate to the backend device that a region of storage is no LONG_PTRer in
* use, and may be discarded at any time without impact to the client. If
* the BLKIF_DISCARD_SECURE flag is set on the request, all copies of the
* discarded region on the device must be rendered unrecoverable before the
* present, the frontend might use blkif_request_indirect structs in order to
* issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
* maximum number of indirect segments is fixed by the backend, but the
- * frontend can issue requests with any number of indirect segments as long as
+ * frontend can issue requests with any number of indirect segments as LONG_PTR as
* it's less than the number provided by the backend. The indirect_grefs field
* in blkif_request_indirect should be filled by the frontend with the
* grant references of the pages that are holding the indirect segments.
enum xsd_sockmsg_type
{
- XS_DEBUG,
+ XS_CONTROL,
+#define XS_DEBUG XS_CONTROL
XS_DIRECTORY,
XS_READ,
XS_GET_PERMS,
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
- XS_RESTRICT,
- XS_RESET_WATCHES
+ /* XS_RESTRICT has been removed */
+ XS_RESET_WATCHES = XS_SET_TARGET + 2,
+ XS_DIRECTORY_PART,
+
+ XS_TYPE_COUNT, /* Number of valid types. */
+
+ XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */
};
#define XS_WRITE_NONE "NONE"
XSD_ERROR(EROFS),
XSD_ERROR(EBUSY),
XSD_ERROR(EAGAIN),
- XSD_ERROR(EISCONN)
+ XSD_ERROR(EISCONN),
+ XSD_ERROR(E2BIG)
};
#endif
char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
XENSTORE_RING_IDX req_cons, req_prod;
XENSTORE_RING_IDX rsp_cons, rsp_prod;
+ uint32_t server_features; /* Bitmap of features supported by the server */
+ uint32_t connection;
};
/* Violating this is very bad. See docs/misc/xenstore.txt. */
#define XENSTORE_ABS_PATH_MAX 3072
#define XENSTORE_REL_PATH_MAX 2048
+/* The ability to reconnect a ring */
+#define XENSTORE_SERVER_FEATURE_RECONNECTION 1
+
+/* Valid values for the connection field */
+#define XENSTORE_CONNECTED 0 /* the steady-state */
+#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */
+
#endif /* _XS_WIRE_H */
/*
#define __XEN_PUBLIC_MEMORY_H__
#include "xen.h"
+#include "physdev.h"
/*
* Increase or decrease the specified domain's memory reservation. Returns the
/* Flag to request allocation only from the node specified */
#define XENMEMF_exact_node_request (1<<17)
#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
+/* Flag to indicate the node specified is virtual node */
+#define XENMEMF_vnode (1<<18)
#endif
struct xen_memory_reservation {
* Returns zero on complete success, otherwise a negative error code.
* On complete success then always @nr_exchanged == @in.nr_extents.
* On partial success @nr_exchanged indicates how much work was done.
+ *
+ * Note that only PV guests can use this operation.
*/
#define XENMEM_exchange 11
struct xen_memory_exchange {
typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
+/*
+ * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
+ *
+ * For a non compat caller, this functions similarly to
+ * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
+ * m2p table.
+ */
+#define XENMEM_machphys_compat_mfn_list 25
+
/*
* Returns the location in virtual address space of the machine_to_phys
* mapping table. Architectures which do not have a m2p table, or which do not
#define XENMAPSPACE_gmfn 2 /* GMFN */
#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
- * XENMEM_add_to_physmap_range only.
- */
+ * XENMEM_add_to_physmap_batch only. */
+#define XENMAPSPACE_dev_mmio 5 /* device mmio region
+ ARM only; the region is mapped in
+ Stage-2 using the Normal Memory
+ Inner/Outer Write-Back Cacheable
+ memory attribute. */
/* ` } */
/*
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
/* A batched version of add_to_physmap. */
-#define XENMEM_add_to_physmap_range 23
-struct xen_add_to_physmap_range {
+#define XENMEM_add_to_physmap_batch 23
+struct xen_add_to_physmap_batch {
/* IN */
/* Which domain to change the mapping for. */
domid_t domid;
/* Number of pages to go through */
uint16_t size;
- domid_t foreign_domid; /* IFF gmfn_foreign */
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040700
+ domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */
+#else
+ union xen_add_to_physmap_batch_extra {
+ domid_t foreign_domid; /* gmfn_foreign */
+ uint16_t res0; /* All the other spaces. Should be 0 */
+ } u;
+#endif
/* Indexes into space being mapped. */
XEN_GUEST_HANDLE(xen_ulong_t) idxs;
/* Per index error code. */
XEN_GUEST_HANDLE(int) errs;
};
-typedef struct xen_add_to_physmap_range xen_add_to_physmap_range_t;
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
+#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
+#define xen_add_to_physmap_range xen_add_to_physmap_batch
+typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+#endif
/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
/*
* Returns the real physical memory map. Passes the same structure as
* XENMEM_memory_map.
+ * Specifying buffer as NULL will return the number of entries required
+ * to store the complete memory map.
* arg == addr of xen_memory_map_t.
*/
#define XENMEM_machine_memory_map 10
#define XENMEM_paging_op_evict 1
#define XENMEM_paging_op_prep 2
-#define XENMEM_access_op 21
-#define XENMEM_access_op_resume 0
-
-struct xen_mem_event_op {
- uint8_t op; /* XENMEM_*_op_* */
+struct xen_mem_paging_op {
+ uint8_t op; /* XENMEM_paging_op_* */
domid_t domain;
-
/* PAGING_PREP IN: buffer to immediately fill page in */
uint64_aligned_t buffer;
/* Other OPs */
uint64_aligned_t gfn; /* IN: gfn of page being operated on */
};
-typedef struct xen_mem_event_op xen_mem_event_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
+typedef struct xen_mem_paging_op xen_mem_paging_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
+
+#define XENMEM_access_op 21
+#define XENMEM_access_op_set_access 0
+#define XENMEM_access_op_get_access 1
+/*
+ * XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are
+ * currently unused, but since they have been in use please do not reuse them.
+ *
+ * #define XENMEM_access_op_enable_emulate 2
+ * #define XENMEM_access_op_disable_emulate 3
+ */
+#define XENMEM_access_op_set_access_multi 4
+
+typedef enum {
+ XENMEM_access_n,
+ XENMEM_access_r,
+ XENMEM_access_w,
+ XENMEM_access_rw,
+ XENMEM_access_x,
+ XENMEM_access_rx,
+ XENMEM_access_wx,
+ XENMEM_access_rwx,
+ /*
+ * Page starts off as r-x, but automatically
+ * change to r-w on a write
+ */
+ XENMEM_access_rx2rw,
+ /*
+ * Log access: starts off as n, automatically
+ * goes to rwx, generating an event without
+ * pausing the vcpu
+ */
+ XENMEM_access_n2rwx,
+ /* Take the domain default */
+ XENMEM_access_default
+} xenmem_access_t;
+
+struct xen_mem_access_op {
+ /* XENMEM_access_op_* */
+ uint8_t op;
+ /* xenmem_access_t */
+ uint8_t access;
+ domid_t domid;
+ /*
+ * Number of pages for set op (or size of pfn_list for
+ * XENMEM_access_op_set_access_multi)
+ * Ignored on setting default access and other ops
+ */
+ uint32_t nr;
+ /*
+ * First pfn for set op
+ * pfn for get op
+ * ~0ull is used to set and get the default access for pages
+ */
+ uint64_aligned_t pfn;
+ /*
+ * List of pfns to set access for
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(const_uint64) pfn_list;
+ /*
+ * Corresponding list of access settings for pfn_list
+ * Used only with XENMEM_access_op_set_access_multi
+ */
+ XEN_GUEST_HANDLE(const_uint8) access_list;
+};
+typedef struct xen_mem_access_op xen_mem_access_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
#define XENMEM_sharing_op 22
#define XENMEM_sharing_op_nominate_gfn 0
#define XENMEM_sharing_op_nominate_gref 1
#define XENMEM_sharing_op_share 2
-#define XENMEM_sharing_op_resume 3
-#define XENMEM_sharing_op_debug_gfn 4
-#define XENMEM_sharing_op_debug_mfn 5
-#define XENMEM_sharing_op_debug_gref 6
-#define XENMEM_sharing_op_add_physmap 7
-#define XENMEM_sharing_op_audit 8
+#define XENMEM_sharing_op_debug_gfn 3
+#define XENMEM_sharing_op_debug_mfn 4
+#define XENMEM_sharing_op_debug_gref 5
+#define XENMEM_sharing_op_add_physmap 6
+#define XENMEM_sharing_op_audit 7
+#define XENMEM_sharing_op_range_share 8
#define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
#define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
* for sharing utilities sitting as "filters" in IO backends
* (e.g. memshr + blktap(2)). The IO backend is only exposed
* to grant references, and this allows sharing of the grefs */
-#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (1ULL << 62)
+#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62)
#define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
(field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
uint64_aligned_t client_gfn; /* IN: the client gfn */
uint64_aligned_t client_handle; /* IN: handle to the client page */
domid_t client_domain; /* IN: the client domain id */
- } share;
+ } share;
+ struct mem_sharing_op_range { /* OP_RANGE_SHARE */
+ uint64_aligned_t first_gfn; /* IN: the first gfn */
+ uint64_aligned_t last_gfn; /* IN: the last gfn */
+ uint64_aligned_t opaque; /* Must be set to 0 */
+ domid_t client_domain; /* IN: the client domain id */
+ uint16_t _pad[3]; /* Must be set to 0 */
+ } range;
struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
union {
uint64_aligned_t gfn; /* IN: gfn to debug */
/*
* XENMEM_claim_pages flags - the are no flags at this time.
- * The zero value is appropiate.
+ * The zero value is appropriate.
+ */
+
+/*
+ * With some legacy devices, certain guest-physical addresses cannot safely
+ * be used for other purposes, e.g. to map guest RAM. This hypercall
+ * enumerates those regions so the toolstack can avoid using them.
+ */
+#define XENMEM_reserved_device_memory_map 27
+struct xen_reserved_device_memory {
+ xen_pfn_t start_pfn;
+ xen_ulong_t nr_pages;
+};
+typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
+
+struct xen_reserved_device_memory_map {
+#define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
+ /* IN */
+ uint32_t flags;
+ /*
+ * IN/OUT
+ *
+ * Gets set to the required number of entries when too low,
+ * signaled by error code -ERANGE.
+ */
+ unsigned int nr_entries;
+ /* OUT */
+ XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
+ /* IN */
+ union {
+ struct physdev_pci_device pci;
+ } dev;
+};
+typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
+
+/*
+ * Get the pages for a particular guest resource, so that they can be
+ * mapped directly by a tools domain.
*/
+#define XENMEM_acquire_resource 28
+struct xen_mem_acquire_resource {
+ /* IN - The domain whose resource is to be mapped */
+ domid_t domid;
+ /* IN - the type of resource */
+ uint16_t type;
+
+#define XENMEM_resource_ioreq_server 0
+
+ /*
+ * IN - a type-specific resource identifier, which must be zero
+ * unless stated otherwise.
+ *
+ * type == XENMEM_resource_ioreq_server -> id == ioreq server id
+ */
+ uint32_t id;
+ /*
+ * IN/OUT - As an IN parameter number of frames of the resource
+ * to be mapped. However, if the specified value is 0 and
+ * frame_list is NULL then this field will be set to the
+ * maximum value supported by the implementation on return.
+ */
+ uint32_t nr_frames;
+ /*
+ * OUT - Must be zero on entry. On return this may contain a bitwise
+ * OR of the following values.
+ */
+ uint32_t flags;
+
+ /* The resource pages have been assigned to the calling domain */
+#define _XENMEM_rsrc_acq_caller_owned 0
+#define XENMEM_rsrc_acq_caller_owned (1u << _XENMEM_rsrc_acq_caller_owned)
+
+ /*
+ * IN - the index of the initial frame to be mapped. This parameter
+ * is ignored if nr_frames is 0.
+ */
+ uint64_aligned_t frame;
+
+#define XENMEM_resource_ioreq_server_frame_bufioreq 0
+#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
+
+ /*
+ * IN/OUT - If the tools domain is PV then, upon return, frame_list
+ * will be populated with the MFNs of the resource.
+ * If the tools domain is HVM then it is expected that, on
+ * entry, frame_list will be populated with a list of GFNs
+ * that will be mapped to the MFNs of the resource.
+ * If -EIO is returned then the frame_list has only been
+ * partially mapped and it is up to the caller to unmap all
+ * the GFNs.
+ * This parameter may be NULL if nr_frames is 0.
+ */
+ XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
+};
+typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_acquire_resource_t);
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+/*
+ * XENMEM_get_vnumainfo used by guest to get
+ * vNUMA topology from hypervisor.
+ */
+#define XENMEM_get_vnumainfo 26
+
+/* vNUMA node memory ranges */
+struct xen_vmemrange {
+ uint64_t start, end;
+ unsigned int flags;
+ unsigned int nid;
+};
+typedef struct xen_vmemrange xen_vmemrange_t;
+DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
+
+/*
+ * vNUMA topology specifies vNUMA node number, distance table,
+ * memory ranges and vcpu mapping provided for guests.
+ * XENMEM_get_vnumainfo hypercall expects to see from guest
+ * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
+ * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
+ * copied back to guest. Domain returns expected values of nr_vnodes,
+ * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
+ */
+struct xen_vnuma_topology_info {
+ /* IN */
+ domid_t domid;
+ uint16_t pad;
+ /* IN/OUT */
+ unsigned int nr_vnodes;
+ unsigned int nr_vcpus;
+ unsigned int nr_vmemranges;
+ /* OUT */
+ union {
+ XEN_GUEST_HANDLE(uint) h;
+ uint64_t pad;
+ } vdistance;
+ union {
+ XEN_GUEST_HANDLE(uint) h;
+ uint64_t pad;
+ } vcpu_to_vnode;
+ union {
+ XEN_GUEST_HANDLE(xen_vmemrange_t) h;
+ uint64_t pad;
+ } vmemrange;
+};
+typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
+DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
+
+/* Next available subop number is 29 */
+
#endif /* __XEN_PUBLIC_MEMORY_H__ */
/*
--- /dev/null
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2006, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_PHYSDEV_H__
+#define __XEN_PUBLIC_PHYSDEV_H__
+
+#include "xen.h"
+
+/*
+ * Prototype for this hypercall is:
+ * int physdev_op(int cmd, void *args)
+ * @cmd == PHYSDEVOP_??? (physdev operation).
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Notify end-of-interrupt (EOI) for the specified IRQ.
+ * @arg == pointer to physdev_eoi structure.
+ */
+#define PHYSDEVOP_eoi 12
+struct physdev_eoi {
+ /* IN */
+ uint32_t irq;
+};
+typedef struct physdev_eoi physdev_eoi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
+
+/*
+ * Register a shared page for the hypervisor to indicate whether the guest
+ * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
+ * once the guest used this function in that the associated event channel
+ * will automatically get unmasked. The page registered is used as a bit
+ * array indexed by Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_gmfn_v1 17
+/*
+ * Register a shared page for the hypervisor to indicate whether the
+ * guest must issue PHYSDEVOP_eoi. This hypercall is very similar to
+ * PHYSDEVOP_pirq_eoi_gmfn_v1 but it doesn't change the semantics of
+ * PHYSDEVOP_eoi. The page registered is used as a bit array indexed by
+ * Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_gmfn_v2 28
+struct physdev_pirq_eoi_gmfn {
+ /* IN */
+ xen_pfn_t gmfn;
+};
+typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t);
+
+/*
+ * Query the status of an IRQ line.
+ * @arg == pointer to physdev_irq_status_query structure.
+ */
+#define PHYSDEVOP_irq_status_query 5
+struct physdev_irq_status_query {
+ /* IN */
+ uint32_t irq;
+ /* OUT */
+ uint32_t flags; /* XENIRQSTAT_* */
+};
+typedef struct physdev_irq_status_query physdev_irq_status_query_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
+
+/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
+#define _XENIRQSTAT_needs_eoi (0)
+#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
+
+/* IRQ shared by multiple guests? */
+#define _XENIRQSTAT_shared (1)
+#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
+
+/*
+ * Set the current VCPU's I/O privilege level.
+ * @arg == pointer to physdev_set_iopl structure.
+ */
+#define PHYSDEVOP_set_iopl 6
+struct physdev_set_iopl {
+ /* IN */
+ uint32_t iopl;
+};
+typedef struct physdev_set_iopl physdev_set_iopl_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
+
+/*
+ * Set the current VCPU's I/O-port permissions bitmap.
+ * @arg == pointer to physdev_set_iobitmap structure.
+ */
+#define PHYSDEVOP_set_iobitmap 7
+struct physdev_set_iobitmap {
+ /* IN */
+#if __XEN_INTERFACE_VERSION__ >= 0x00030205
+ XEN_GUEST_HANDLE(uint8) bitmap;
+#else
+ uint8_t *bitmap;
+#endif
+ uint32_t nr_ports;
+};
+typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
+
+/*
+ * Read or write an IO-APIC register.
+ * @arg == pointer to physdev_apic structure.
+ */
+#define PHYSDEVOP_apic_read 8
+#define PHYSDEVOP_apic_write 9
+struct physdev_apic {
+ /* IN */
+ ULONG_PTR apic_physbase;
+ uint32_t reg;
+ /* IN or OUT */
+ uint32_t value;
+};
+typedef struct physdev_apic physdev_apic_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
+
+/*
+ * Allocate or free a physical upcall vector for the specified IRQ line.
+ * @arg == pointer to physdev_irq structure.
+ */
+#define PHYSDEVOP_alloc_irq_vector 10
+#define PHYSDEVOP_free_irq_vector 11
+struct physdev_irq {
+ /* IN */
+ uint32_t irq;
+ /* IN or OUT */
+ uint32_t vector;
+};
+typedef struct physdev_irq physdev_irq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
+
+#define MAP_PIRQ_TYPE_MSI 0x0
+#define MAP_PIRQ_TYPE_GSI 0x1
+#define MAP_PIRQ_TYPE_UNKNOWN 0x2
+#define MAP_PIRQ_TYPE_MSI_SEG 0x3
+#define MAP_PIRQ_TYPE_MULTI_MSI 0x4
+
+#define PHYSDEVOP_map_pirq 13
+struct physdev_map_pirq {
+ domid_t domid;
+ /* IN */
+ int type;
+ /* IN (ignored for ..._MULTI_MSI) */
+ int index;
+ /* IN or OUT */
+ int pirq;
+ /* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */
+ int bus;
+ /* IN */
+ int devfn;
+ /* IN (also OUT for ..._MULTI_MSI) */
+ int entry_nr;
+ /* IN */
+ uint64_t table_base;
+};
+typedef struct physdev_map_pirq physdev_map_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
+
+#define PHYSDEVOP_unmap_pirq 14
+struct physdev_unmap_pirq {
+ domid_t domid;
+ /* IN */
+ int pirq;
+};
+
+typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
+
+#define PHYSDEVOP_manage_pci_add 15
+#define PHYSDEVOP_manage_pci_remove 16
+struct physdev_manage_pci {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+};
+
+typedef struct physdev_manage_pci physdev_manage_pci_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
+
+#define PHYSDEVOP_restore_msi 19
+struct physdev_restore_msi {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+};
+typedef struct physdev_restore_msi physdev_restore_msi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t);
+
+#define PHYSDEVOP_manage_pci_add_ext 20
+struct physdev_manage_pci_ext {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+ unsigned is_extfn;
+ unsigned is_virtfn;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+};
+
+typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t);
+
+/*
+ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
+ * hypercall since 0x00030202.
+ */
+struct physdev_op {
+ uint32_t cmd;
+ union {
+ struct physdev_irq_status_query irq_status_query;
+ struct physdev_set_iopl set_iopl;
+ struct physdev_set_iobitmap set_iobitmap;
+ struct physdev_apic apic_op;
+ struct physdev_irq irq_op;
+ } u;
+};
+typedef struct physdev_op physdev_op_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
+
+#define PHYSDEVOP_setup_gsi 21
+struct physdev_setup_gsi {
+ int gsi;
+ /* IN */
+ uint8_t triggering;
+ /* IN */
+ uint8_t polarity;
+ /* IN */
+};
+
+typedef struct physdev_setup_gsi physdev_setup_gsi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t);
+
+/* leave PHYSDEVOP 22 free */
+
+/* type is MAP_PIRQ_TYPE_GSI or MAP_PIRQ_TYPE_MSI
+ * the hypercall returns a free pirq */
+#define PHYSDEVOP_get_free_pirq 23
+struct physdev_get_free_pirq {
+ /* IN */
+ int type;
+ /* OUT */
+ uint32_t pirq;
+};
+
+typedef struct physdev_get_free_pirq physdev_get_free_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_get_free_pirq_t);
+
+#define XEN_PCI_MMCFG_RESERVED 0x1
+
+#define PHYSDEVOP_pci_mmcfg_reserved 24
+struct physdev_pci_mmcfg_reserved {
+ uint64_t address;
+ uint16_t segment;
+ uint8_t start_bus;
+ uint8_t end_bus;
+ uint32_t flags;
+};
+typedef struct physdev_pci_mmcfg_reserved physdev_pci_mmcfg_reserved_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_mmcfg_reserved_t);
+
+#define XEN_PCI_DEV_EXTFN 0x1
+#define XEN_PCI_DEV_VIRTFN 0x2
+#define XEN_PCI_DEV_PXM 0x4
+
+#define PHYSDEVOP_pci_device_add 25
+struct physdev_pci_device_add {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t flags;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+ /*
+ * Optional parameters array.
+ * First element ([0]) is PXM domain associated with the device (if
+ * XEN_PCI_DEV_PXM is set)
+ */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint32_t optarr[];
+#elif defined(__GNUC__)
+ uint32_t optarr[0];
+#endif
+};
+typedef struct physdev_pci_device_add physdev_pci_device_add_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_add_t);
+
+#define PHYSDEVOP_pci_device_remove 26
+#define PHYSDEVOP_restore_msi_ext 27
+/*
+ * Dom0 should use these two to announce MMIO resources assigned to
+ * MSI-X capable devices won't (prepare) or may (release) change.
+ */
+#define PHYSDEVOP_prepare_msix 30
+#define PHYSDEVOP_release_msix 31
+struct physdev_pci_device {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+};
+typedef struct physdev_pci_device physdev_pci_device_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t);
+
+#define PHYSDEVOP_DBGP_RESET_PREPARE 1
+#define PHYSDEVOP_DBGP_RESET_DONE 2
+
+#define PHYSDEVOP_DBGP_BUS_UNKNOWN 0
+#define PHYSDEVOP_DBGP_BUS_PCI 1
+
+#define PHYSDEVOP_dbgp_op 29
+struct physdev_dbgp_op {
+ /* IN */
+ uint8_t op;
+ uint8_t bus;
+ union {
+ struct physdev_pci_device pci;
+ } u;
+};
+typedef struct physdev_dbgp_op physdev_dbgp_op_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_dbgp_op_t);
+
+/*
+ * Notify that some PIRQ-bound event channels have been unmasked.
+ * ** This command is obsolete since interface version 0x00030202 and is **
+ * ** unsupported by newer versions of Xen. **
+ */
+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
+/*
+ * These all-capitals physdev operation names are superceded by the new names
+ * (defined above) since interface version 0x00030202. The guard above was
+ * added post-4.5 only though and hence shouldn't check for 0x00030202.
+ */
+#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
+#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
+#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
+#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
+#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
+#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
+#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
+#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
+#endif
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040200
+#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v1
+#else
+#define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v2
+#endif
+
+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
* Halt execution of this domain (all VCPUs) and notify the system controller.
* @arg == pointer to sched_shutdown_t structure.
*
- * If the sched_shutdown_t reason is SHUTDOWN_suspend then this
- * hypercall takes an additional extra argument which should be the
- * MFN of the guest's start_info_t.
+ * If the sched_shutdown_t reason is SHUTDOWN_suspend then
+ * x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN
+ * of the guest's start info page. RDX/EDX is the third hypercall
+ * argument.
*
* In addition, which reason is SHUTDOWN_suspend this hypercall
* returns 1 if suspend was cancelled or the domain was merely
* With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
*/
#define SCHEDOP_watchdog 6
+
+/*
+ * Override the current vcpu affinity by pinning it to one physical cpu or
+ * undo this override restoring the previous affinity.
+ * @arg == pointer to sched_pin_override_t structure.
+ *
+ * A negative pcpu value will undo a previous pin override and restore the
+ * previous cpu affinity.
+ * This call is allowed for the hardware domain only and requires the cpu
+ * to be part of the domain's cpupool.
+ */
+#define SCHEDOP_pin_override 7
/* ` } */
struct sched_shutdown {
typedef struct sched_watchdog sched_watchdog_t;
DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
+struct sched_pin_override {
+ int32_t pcpu;
+};
+typedef struct sched_pin_override sched_pin_override_t;
+DEFINE_XEN_GUEST_HANDLE(sched_pin_override_t);
+
/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
* software to determine the appropriate action. For the most part, Xen does
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
-#define SHUTDOWN_MAX 4 /* Maximum valid shutdown reason. */
+
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
+#define SHUTDOWN_MAX 5 /* Maximum valid shutdown reason. */
/* ` } */
#endif /* __XEN_PUBLIC_SCHED_H__ */
#define TRC_SUBCLS_SHIFT 12
/* trace subclasses for SVM */
-#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
-#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
+#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
+#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
+#define TRC_HVM_EMUL 0x00084000 /* emulated devices */
#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
#define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */
/* Per-scheduler IDs, to identify scheduler specific events */
#define TRC_SCHED_CSCHED 0
#define TRC_SCHED_CSCHED2 1
-#define TRC_SCHED_SEDF 2
+/* #define XEN_SCHEDULER_SEDF 2 (Removed) */
#define TRC_SCHED_ARINC653 3
+#define TRC_SCHED_RTDS 4
+#define TRC_SCHED_SNULL 5
/* Per-scheduler tracing */
#define TRC_SCHED_CLASS_EVT(_c, _e) \
((TRC_SCHED_##_c << TRC_SCHED_ID_SHIFT) & TRC_SCHED_ID_MASK) ) + \
(_e & TRC_SCHED_EVT_MASK) )
+/* Trace classes for DOM0 operations */
+#define TRC_DOM0_DOMOPS 0x00041000 /* Domains manipulations */
+
/* Trace classes for Hardware */
#define TRC_HW_PM 0x00801000 /* Power management traces */
#define TRC_HW_IRQ 0x00802000 /* Traces relating to the handling of IRQs */
#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14)
#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15)
#define TRC_SCHED_SHUTDOWN_CODE (TRC_SCHED_VERBOSE + 16)
+#define TRC_SCHED_SWITCH_INFCONT (TRC_SCHED_VERBOSE + 17)
+
+#define TRC_DOM0_DOM_ADD (TRC_DOM0_DOMOPS + 1)
+#define TRC_DOM0_DOM_REM (TRC_DOM0_DOMOPS + 2)
#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
#define TRC_HVM_TRAP (TRC_HVM_HANDLER + 0x23)
#define TRC_HVM_TRAP_DEBUG (TRC_HVM_HANDLER + 0x24)
#define TRC_HVM_VLAPIC (TRC_HVM_HANDLER + 0x25)
+#define TRC_HVM_XCR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x26)
+#define TRC_HVM_XCR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x27)
#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)
+/* Trace events for emulated devices */
+#define TRC_HVM_EMUL_HPET_START_TIMER (TRC_HVM_EMUL + 0x1)
+#define TRC_HVM_EMUL_PIT_START_TIMER (TRC_HVM_EMUL + 0x2)
+#define TRC_HVM_EMUL_RTC_START_TIMER (TRC_HVM_EMUL + 0x3)
+#define TRC_HVM_EMUL_LAPIC_START_TIMER (TRC_HVM_EMUL + 0x4)
+#define TRC_HVM_EMUL_HPET_STOP_TIMER (TRC_HVM_EMUL + 0x5)
+#define TRC_HVM_EMUL_PIT_STOP_TIMER (TRC_HVM_EMUL + 0x6)
+#define TRC_HVM_EMUL_RTC_STOP_TIMER (TRC_HVM_EMUL + 0x7)
+#define TRC_HVM_EMUL_LAPIC_STOP_TIMER (TRC_HVM_EMUL + 0x8)
+#define TRC_HVM_EMUL_PIT_TIMER_CB (TRC_HVM_EMUL + 0x9)
+#define TRC_HVM_EMUL_LAPIC_TIMER_CB (TRC_HVM_EMUL + 0xA)
+#define TRC_HVM_EMUL_PIC_INT_OUTPUT (TRC_HVM_EMUL + 0xB)
+#define TRC_HVM_EMUL_PIC_KICK (TRC_HVM_EMUL + 0xC)
+#define TRC_HVM_EMUL_PIC_INTACK (TRC_HVM_EMUL + 0xD)
+#define TRC_HVM_EMUL_PIC_POSEDGE (TRC_HVM_EMUL + 0xE)
+#define TRC_HVM_EMUL_PIC_NEGEDGE (TRC_HVM_EMUL + 0xF)
+#define TRC_HVM_EMUL_PIC_PEND_IRQ_CALL (TRC_HVM_EMUL + 0x10)
+#define TRC_HVM_EMUL_LAPIC_PIC_INTR (TRC_HVM_EMUL + 0x11)
+
/* trace events for per class */
#define TRC_PM_FREQ_CHANGE (TRC_HW_PM + 0x01)
#define TRC_PM_IDLE_ENTRY (TRC_HW_PM + 0x02)
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040300
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040900
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
DEFINE_XEN_GUEST_HANDLE(uint64_t);
DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
+
+/* Turn a plain number into a C unsigned (LONG_PTR (LONG_PTR)) constant. */
+#define __xen_mk_uint(x) x ## U
+#define __xen_mk_ulong(x) x ## UL
+#ifndef __xen_mk_ullong
+# define __xen_mk_ullong(x) x ## ULL
+#endif
+#define xen_mk_uint(x) __xen_mk_uint(x)
+#define xen_mk_ulong(x) __xen_mk_ulong(x)
+#define xen_mk_ullong(x) __xen_mk_ullong(x)
+
+#else
+
+/* In assembly code we cannot use C numeric constant suffixes. */
+#define xen_mk_uint(x) x
+#define xen_mk_ulong(x) x
+#define xen_mk_ullong(x) x
+
#endif
/*
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
+#define __HYPERVISOR_xenpmu_op 40
+#define __HYPERVISOR_dm_op 41
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
+#define VIRQ_XENPMU 13 /* V. PMC interrupt */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
* As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
* with those in @val.
*
+ * ptr[1:0] == MMU_PT_UPDATE_NO_TRANSLATE:
+ * As MMU_NORMAL_PT_UPDATE above, but @val is not translated though FD
+ * page tables.
+ *
* @val is usually the machine frame number along with some attributes.
* The attributes by default follow the architecture defined bits. Meaning that
* if this is a X86_64 machine and four page table layout is used, the layout
*
* PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
*/
-#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
-#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
-#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
+#define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
+#define MMU_PT_UPDATE_NO_TRANSLATE 3 /* checked '*ptr = val'. ptr is MA. */
+ /* val never translated. */
/*
* MMU EXTENDED OPERATIONS
/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
/* ` enum uvm_flags { */
-#define UVMF_NONE (0UL<<0) /* No flushing at all. */
-#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
-#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
-#define UVMF_FLUSHTYPE_MASK (3UL<<0)
-#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
-#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
-#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
+#define UVMF_NONE (xen_mk_ulong(0)<<0) /* No flushing at all. */
+#define UVMF_TLB_FLUSH (xen_mk_ulong(1)<<0) /* Flush entire TLB(s). */
+#define UVMF_INVLPG (xen_mk_ulong(2)<<0) /* Flush only one entry. */
+#define UVMF_FLUSHTYPE_MASK (xen_mk_ulong(3)<<0)
+#define UVMF_MULTI (xen_mk_ulong(0)<<2) /* Flush subset of TLBs. */
+#define UVMF_LOCAL (xen_mk_ulong(0)<<2) /* Flush local TLB. */
+#define UVMF_ALL (xen_mk_ulong(1)<<2) /* Flush all TLBs. */
/* ` } */
/*
/* x86/PAE guests: support PDPTs above 4GB. */
#define VMASST_TYPE_pae_extended_cr3 3
-#define MAX_VMASST_TYPE 3
+/*
+ * x86 guests: Sane behaviour for virtual iopl
+ * - virtual iopl updated from do_iret() hypercalls.
+ * - virtual iopl reported in bounce frames.
+ * - guest kernels assumed to be level 0 for the purpose of iopl checks.
+ */
+#define VMASST_TYPE_architectural_iopl 4
-#ifndef __ASSEMBLY__
+/*
+ * All guests: activate update indicator in vcpu_runstate_info
+ * Enable setting the XEN_RUNSTATE_UPDATE flag in guest memory mapped
+ * vcpu_runstate_info during updates of the runstate information.
+ */
+#define VMASST_TYPE_runstate_update_flag 5
-typedef uint16_t domid_t;
+/*
+ * x86/64 guests: strictly hide M2P from user mode.
+ * This allows the guest to control respective hypervisor behavior:
+ * - when not set, L4 tables get created with the respective slot blank,
+ * and whenever the L4 table gets used as a kernel one the missing
+ * mapping gets inserted,
+ * - when set, L4 tables get created with the respective slot initialized
+ * as before, and whenever the L4 table gets used as a user one the
+ * mapping gets zapped.
+ */
+#define VMASST_TYPE_m2p_strict 32
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040600
+#define MAX_VMASST_TYPE 3
+#endif
/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
-#define DOMID_FIRST_RESERVED (0x7FF0U)
+#define DOMID_FIRST_RESERVED xen_mk_uint(0x7FF0)
/* DOMID_SELF is used in certain contexts to refer to oneself. */
-#define DOMID_SELF (0x7FF0U)
+#define DOMID_SELF xen_mk_uint(0x7FF0)
/*
* DOMID_IO is used to restrict page-table updates to mapping I/O memory.
* is useful to ensure that no mappings to the OS's own heap are accidentally
* installed. (e.g., in Linux this could cause havoc as reference counts
* aren't adjusted on the I/O-mapping code path).
- * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
- * be specified by any calling domain.
+ * This only makes sense as HYPERVISOR_mmu_update()'s and
+ * HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument. For
+ * HYPERVISOR_mmu_update() context it can be specified by any calling domain,
+ * otherwise it's only permitted if the caller is privileged.
*/
-#define DOMID_IO (0x7FF1U)
+#define DOMID_IO xen_mk_uint(0x7FF1)
/*
* DOMID_XEN is used to allow privileged domains to map restricted parts of
* Xen's heap space (e.g., the machine_to_phys table).
- * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
- * the caller is privileged.
+ * This only makes sense as
+ * - HYPERVISOR_mmu_update()'s, HYPERVISOR_mmuext_op()'s, or
+ * HYPERVISOR_update_va_mapping_otherdomain()'s "foreigndom" argument,
+ * - with XENMAPSPACE_gmfn_foreign,
+ * and is only permitted if the caller is privileged.
*/
-#define DOMID_XEN (0x7FF2U)
+#define DOMID_XEN xen_mk_uint(0x7FF2)
/*
* DOMID_COW is used as the owner of sharable pages */
-#define DOMID_COW (0x7FF3U)
+#define DOMID_COW xen_mk_uint(0x7FF3)
/* DOMID_INVALID is used to identify pages with unknown owner. */
-#define DOMID_INVALID (0x7FF4U)
+#define DOMID_INVALID xen_mk_uint(0x7FF4)
/* Idle domain. */
-#define DOMID_IDLE (0x7FFFU)
+#define DOMID_IDLE xen_mk_uint(0x7FFF)
+
+#ifndef __ASSEMBLY__
+
+typedef uint16_t domid_t;
/*
* Send an array of these to HYPERVISOR_mmu_update().
/*
* ` enum neg_errnoval
* ` HYPERVISOR_multicall(multicall_entry_t call_list[],
- * ` unsigned int nr_calls);
+ * ` uint32_t nr_calls);
*
- * NB. The fields are natural register size for this architecture.
+ * NB. The fields are logically the natural register size for this
+ * architecture. In cases where xen_ulong_t is larger than this then
+ * any unused bits in the upper portion must be zero.
*/
struct multicall_entry {
- ULONG_PTR op, result;
- ULONG_PTR args[6];
+ xen_ulong_t op, result;
+ xen_ulong_t args[6];
};
typedef struct multicall_entry multicall_entry_t;
DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
+#if __XEN_INTERFACE_VERSION__ < 0x00040400
/*
- * Event channel endpoints per domain:
+ * Event channel endpoints per domain (when using the 2-level ABI):
* 1024 if a LONG_PTR is 32 bits; 4096 if a LONG_PTR is 64 bits.
*/
-#define NR_EVENT_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
+#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS
+#endif
struct vcpu_time_info {
/*
*/
uint32_t tsc_to_system_mul;
int8_t tsc_shift;
+#if __XEN_INTERFACE_VERSION__ > 0x040600
+ uint8_t flags;
+ uint8_t pad1[2];
+#else
int8_t pad1[3];
+#endif
}; /* 32 bytes */
typedef struct vcpu_time_info vcpu_time_info_t;
+#define XEN_PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define XEN_PVCLOCK_GUEST_STOPPED (1 << 1)
+
struct vcpu_info {
/*
* 'evtchn_upcall_pending' is written non-zero by Xen to indicate
uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
+#if !defined(__i386__)
+ uint32_t wc_sec_hi;
+# define xen_wc_sec_hi wc_sec_hi
+#elif !defined(__XEN__) && !defined(__XEN_TOOLS__)
+# define xen_wc_sec_hi arch.wc_sec_hi
+#endif
struct arch_shared_info arch;
* 3. This the order of bootstrap elements in the initial virtual region:
* a. relocated kernel image
* b. initial ram disk [mod_start, mod_len]
+ * (may be omitted)
* c. list of allocated page frames [mfn_list, nr_pages]
* (unless relocated due to XEN_ELFNOTE_INIT_P2M)
- * d. start_info_t structure [register ESI (x86)]
- * e. bootstrap page tables [pt_base and CR3 (x86)]
- * f. bootstrap stack [register ESP (x86)]
+ * d. start_info_t structure [register rSI (x86)]
+ * in case of dom0 this page contains the console info, too
+ * e. unless dom0: xenstore ring page
+ * f. unless dom0: console ring page
+ * g. bootstrap page tables [pt_base and CR3 (x86)]
+ * h. bootstrap stack [register ESP (x86)]
* 4. Bootstrap elements are packed together, but each is 4kB-aligned.
- * 5. The initial ram disk may be omitted.
- * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
+ * 5. The list of page frames forms a contiguous 'pseudo-physical' memory
* layout for the domain. In particular, the bootstrap virtual-memory
* region is a 1:1 mapping to the first section of the pseudo-physical map.
- * 7. All bootstrap elements are mapped read-writable for the guest OS. The
+ * 6. All bootstrap elements are mapped read-writable for the guest OS. The
* only exception is the bootstrap page table, which is mapped read-only.
- * 8. There is guaranteed to be at least 512kB padding after the final
+ * 7. There is guaranteed to be at least 512kB padding after the final
* bootstrap element. If necessary, the bootstrap virtual region is
* extended by an extra 4MB to ensure this.
*
* Note: Prior to 25833:bb85bbccb1c9. ("x86/32-on-64 adjust Dom0 initial page
- * table layout") a bug caused the pt_base (3.e above) and cr3 to not point
+ * table layout") a bug caused the pt_base (3.g above) and cr3 to not point
* to the start of the guest page tables (it was offset by two pages).
* This only manifested itself on 32-on-64 dom0 kernels and not 32-on-64 domU
* or 64-bit kernels of any colour. The page tables for a 32-on-64 dom0 got
#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
+ /* P->M making the 3 level tree obsolete? */
#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
/*
typedef uint8_t xen_domain_handle_t[16];
-/* Turn a plain number into a C ULONG_PTR constant. */
-#define __mk_unsigned_long(x) x ## UL
-#define mk_unsigned_long(x) __mk_unsigned_long(x)
-
__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
-#else /* __ASSEMBLY__ */
+typedef struct {
+ uint8_t a[16];
+} xen_uuid_t;
-/* In assembly code we cannot use C numeric constant suffixes. */
-#define mk_unsigned_long(x) x
+/*
+ * XEN_DEFINE_UUID(0x00112233, 0x4455, 0x6677, 0x8899,
+ * 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff)
+ * will construct UUID 00112233-4455-6677-8899-aabbccddeeff presented as
+ * {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ * 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff};
+ *
+ * NB: This is compatible with Linux kernel and with libuuid, but it is not
+ * compatible with Microsoft, as they use mixed-endian encoding (some
+ * components are little-endian, some are big-endian).
+ */
+#define XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6) \
+ {{((a) >> 24) & 0xFF, ((a) >> 16) & 0xFF, \
+ ((a) >> 8) & 0xFF, ((a) >> 0) & 0xFF, \
+ ((b) >> 8) & 0xFF, ((b) >> 0) & 0xFF, \
+ ((c) >> 8) & 0xFF, ((c) >> 0) & 0xFF, \
+ ((d) >> 8) & 0xFF, ((d) >> 0) & 0xFF, \
+ e1, e2, e3, e4, e5, e6}}
+
+#if defined(__STDC_VERSION__) ? __STDC_VERSION__ >= 199901L : defined(__GNUC__)
+#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \
+ ((xen_uuid_t)XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6))
+#else
+#define XEN_DEFINE_UUID(a, b, c, d, e1, e2, e3, e4, e5, e6) \
+ XEN_DEFINE_UUID_(a, b, c, d, e1, e2, e3, e4, e5, e6)
+#endif /* __STDC_VERSION__ / __GNUC__ */
#endif /* !__ASSEMBLY__ */
/* Default definitions for macros used by domctl/sysctl. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#ifndef int64_aligned_t
+#define int64_aligned_t int64_t
+#endif
#ifndef uint64_aligned_t
#define uint64_aligned_t uint64_t
#endif
#include <stdlib.h>
-#define DOMID_INVALID (0x7FF4U)
// States in XenStore (Note - numbers must match!)
typedef enum _XENBUS_STATE {
XENBUS_STATE_INVALID = 0, // 0
PXENBUS_STORE_WATCH BackendWatch;
};
-#define DOMID_INVALID (0x7FF4U)
-
static const PCHAR
__XenvbdStateName(
IN XENVBD_STATE State