#ifndef __XEN_PUBLIC_ARCH_PPC_64_H__
#define __XEN_PUBLIC_ARCH_PPC_64_H__
-#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { \
int __pad[(sizeof (long long) - sizeof (void *)) / sizeof (int)]; \
type *p; \
} __attribute__((__aligned__(8))) __guest_handle_ ## name
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+ ___DEFINE_XEN_GUEST_HANDLE(name, type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define set_xen_guest_handle(hnd, val) \
DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
+/*
+ * MTRR MSRs
+ */
+
+struct hvm_hw_mtrr {
+#define MTRR_VCNT 8
+#define NUM_FIXED_MSR 11
+ uint64_t msr_pat_cr;
+ /* mtrr physbase & physmask msr pair*/
+ uint64_t msr_mtrr_var[MTRR_VCNT*2];
+ uint64_t msr_mtrr_fixed[NUM_FIXED_MSR];
+ uint64_t msr_mtrr_cap;
+ uint64_t msr_mtrr_def_type;
+};
+
+DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
+
/*
* Largest type-code in use
*/
-#define HVM_SAVE_CODE_MAX 13
+#define HVM_SAVE_CODE_MAX 14
#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
-#undef __DEFINE_XEN_GUEST_HANDLE
-#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+#undef ___DEFINE_XEN_GUEST_HANDLE
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } \
__guest_handle_ ## name; \
typedef struct { union { type *p; uint64_aligned_t q; }; } \
/* Structural guest handles introduced in 0x00030201. */
#if __XEN_INTERFACE_VERSION__ >= 0x00030201
-#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef struct { type *p; } __guest_handle_ ## name
#else
-#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
typedef type * __guest_handle_ ## name
#endif
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+ ___DEFINE_XEN_GUEST_HANDLE(name, type)
#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
struct xenctl_cpumap {
- XEN_GUEST_HANDLE_64(uint8_t) bitmap;
+ XEN_GUEST_HANDLE_64(uint8) bitmap;
uint32_t nr_cpus;
};
uint64_aligned_t max_pfns;
/* Start index in guest's page list. */
uint64_aligned_t start_pfn;
- XEN_GUEST_HANDLE_64(uint64_t) buffer;
+ XEN_GUEST_HANDLE_64(uint64) buffer;
/* OUT variables. */
uint64_aligned_t num_pfns;
};
/* IN variables. */
uint64_aligned_t num;
/* IN/OUT variables. */
- XEN_GUEST_HANDLE_64(uint32_t) array;
+ XEN_GUEST_HANDLE_64(uint32) array;
};
typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
uint32_t mb; /* Shadow memory allocation in MB */
/* OP_PEEK / OP_CLEAN */
- XEN_GUEST_HANDLE_64(uint8_t) dirty_bitmap;
+ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
struct xen_domctl_shadow_op_stats stats;
};
#define XEN_DOMCTL_sethvmcontext 34
typedef struct xen_domctl_hvmcontext {
uint32_t size; /* IN/OUT: size of buffer / bytes filled */
- XEN_GUEST_HANDLE_64(uint8_t) buffer; /* IN/OUT: data, or call
- * gethvmcontext with NULL
- * buffer to get size
- * req'd */
+ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call
+ * gethvmcontext with NULL
+ * buffer to get size req'd */
} xen_domctl_hvmcontext_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
-#define XEN_DOMCTL_assign_device 37
-#define DPCI_ADD_MAPPING 1
-#define DPCI_REMOVE_MAPPING 0
+#define XEN_DOMCTL_assign_device 37
+#define XEN_DOMCTL_test_assign_device 45
struct xen_domctl_assign_device {
uint32_t machine_bdf; /* machine PCI ID of assigned device */
};
/* Bind machine I/O address range -> HVM address range. */
#define XEN_DOMCTL_memory_mapping 39
+#define DPCI_ADD_MAPPING 1
+#define DPCI_REMOVE_MAPPING 0
struct xen_domctl_memory_mapping {
uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
uint64_aligned_t first_mfn; /* first page (machine page) in range */
*/
#define XEN_DOMCTL_set_opt_feature 44
struct xen_domctl_set_opt_feature {
-#ifdef __ia64__
+#if defined(__ia64__)
struct xen_ia64_opt_feature optf;
+#else
+ /* Make struct non-empty: do not depend on this field name! */
+ uint64_t dummy;
#endif
};
typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t;
#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
-#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */
+#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
+#define GNTST_address_too_big (-11) /* transfer page address too large. */
#define GNTTABOP_error_msgs { \
"okay", \
"no spare translation slot in the I/O MMU", \
"permission denied", \
"bad page", \
- "copy arguments cross page boundary" \
+ "copy arguments cross page boundary", \
+ "page address size too large" \
}
#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
* of the real data to use. */
uint8_t dir:1; /* 1=read, 0=write */
uint8_t df:1;
+ uint8_t pad:1;
uint8_t type; /* I/O type */
uint8_t _pad0[6];
uint64_t io_count; /* How many IO done on a vcpu */
};
typedef struct shared_iopage shared_iopage_t;
-#define IOREQ_BUFFER_SLOT_NUM 80
+struct buf_ioreq {
+ uint8_t type; /* I/O type */
+ uint8_t pad:1;
+ uint8_t dir:1; /* 1=read, 0=write */
+ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */
+ uint32_t addr:20;/* physical address */
+ uint32_t data; /* data */
+};
+typedef struct buf_ioreq buf_ioreq_t;
+
+#define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */
struct buffered_iopage {
- unsigned int read_pointer;
- unsigned int write_pointer;
- ioreq_t ioreq[IOREQ_BUFFER_SLOT_NUM];
+ unsigned int read_pointer;
+ unsigned int write_pointer;
+ buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM];
}; /* NB. Size of this structure must be no greater than one page. */
typedef struct buffered_iopage buffered_iopage_t;
#ifdef __ia64__
#define HVM_PARAM_NVRAM_FD 7
#define HVM_PARAM_VHPT_SIZE 8
-#define HVM_NR_PARAMS 9
-#else
-#define HVM_NR_PARAMS 7
+#define HVM_PARAM_BUFPIOREQ_PFN 9
#endif
+/*
+ * Set mode for virtual timers (currently x86 only):
+ * delay_for_missed_ticks (default):
+ * Do not advance a vcpu's time beyond the correct delivery time for
+ * interrupts that have been missed due to preemption. Deliver missed
+ * interrupts when the vcpu is rescheduled and advance the vcpu's virtual
+ * time stepwise for each one.
+ * no_delay_for_missed_ticks:
+ * As above, missed interrupts are delivered, but guest time always tracks
+ * wallclock (i.e., real) time while doing so.
+ * no_missed_ticks_pending:
+ * No missed interrupts are held pending. Instead, to ensure ticks are
+ * delivered at some non-zero rate, if we detect missed ticks then the
+ * internal tick alarm is not disabled if the VCPU is preempted during the
+ * next tick period.
+ * one_missed_tick_pending:
+ * Missed interrupts are collapsed together and delivered as one 'late tick'.
+ * Guest time always tracks wallclock (i.e., real) time.
+ */
+#define HVM_PARAM_TIMER_MODE 10
+#define HVMPTM_delay_for_missed_ticks 0
+#define HVMPTM_no_delay_for_missed_ticks 1
+#define HVMPTM_no_missed_ticks_pending 2
+#define HVMPTM_one_missed_tick_pending 3
+
+#define HVM_NR_PARAMS 11
+
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
XENSTORE_RING_IDX rsp_cons, rsp_prod;
};
+/* Violating this is very bad. See docs/misc/xenstore.txt. */
+#define XENSTORE_PAYLOAD_MAX 4096
+
+/* Violating these just gets you an error back */
+#define XENSTORE_ABS_PATH_MAX 3072
+#define XENSTORE_REL_PATH_MAX 2048
+
#endif /* _XS_WIRE_H */
/*
#define PHYSDEVOP_set_iobitmap 7
struct physdev_set_iobitmap {
/* IN */
- XEN_GUEST_HANDLE_00030205(uint8_t) bitmap;
+ XEN_GUEST_HANDLE_00030205(uint8) bitmap;
uint32_t nr_ports;
};
typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
uint8_t capabilities;
uint8_t edid_transfer_time;
/* must refer to 128-byte buffer */
- XEN_GUEST_HANDLE(uint8_t) edid;
+ XEN_GUEST_HANDLE(uint8) edid;
} vbeddc_info; /* XEN_FW_VBEDDC_INFO */
} u;
};
struct xenpf_getidletime {
/* IN/OUT variables */
/* IN: CPUs to interrogate; OUT: subset of IN which are present */
- XEN_GUEST_HANDLE(uint8_t) cpumap_bitmap;
+ XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
/* IN variables */
/* Size of cpumap bitmap. */
uint32_t cpumap_nr_cpus;
/* Must be indexable for every cpu in cpumap_bitmap. */
- XEN_GUEST_HANDLE(uint64_t) idletime;
+ XEN_GUEST_HANDLE(uint64) idletime;
/* OUT variables */
/* System time when the idletime snapshots were taken. */
uint64_t now;
* If the actual @max_cpu_id is smaller than the array then the trailing
* elements of the array will not be written by the sysctl.
*/
- XEN_GUEST_HANDLE_64(uint32_t) cpu_to_node;
+ XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
};
typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);