struct hvm_emulate_ctxt *hvmemul_ctxt)
{
struct vcpu *curr = current;
+ pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
pfec |= PFEC_user_mode;
rc = ((access_type == hvm_access_insn_fetch) ?
- hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec) :
- hvm_copy_from_guest_virt(p_data, addr, bytes, pfec));
+ hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo) :
+ hvm_copy_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo));
switch ( rc )
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
struct vcpu *curr = current;
+ pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
(hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
pfec |= PFEC_user_mode;
- rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec);
+ rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec, &pfinfo);
switch ( rc )
{
struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
bool_t otd_writable, ntd_writable;
unsigned long eflags;
+ pagefault_info_t pfinfo;
int exn_raised, rc;
struct {
u16 back_link,__blh;
}
rc = hvm_copy_from_guest_virt(
- &tss, prev_tr.base, sizeof(tss), PFEC_page_present);
+ &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
if ( rc != HVMCOPY_okay )
goto out;
&tss.eip,
offsetof(typeof(tss), trace) -
offsetof(typeof(tss), eip),
- PFEC_page_present);
+ PFEC_page_present, &pfinfo);
if ( rc != HVMCOPY_okay )
goto out;
rc = hvm_copy_from_guest_virt(
- &tss, tr.base, sizeof(tss), PFEC_page_present);
+ &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
/*
* Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
* functions knew we want RO access.
tss.back_link = prev_tr.sel;
rc = hvm_copy_to_guest_virt(tr.base + offsetof(typeof(tss), back_link),
- &tss.back_link, sizeof(tss.back_link), 0);
+ &tss.back_link, sizeof(tss.back_link), 0,
+ &pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
exn_raised = 1;
else if ( rc != HVMCOPY_okay )
16 << segr.attr.fields.db,
&linear_addr) )
{
- rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0);
+ rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0,
+ &pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
exn_raised = 1;
else if ( rc != HVMCOPY_okay )
#define HVMCOPY_phys (0u<<2)
#define HVMCOPY_virt (1u<<2)
static enum hvm_copy_result __hvm_copy(
- void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
+ void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
+ pagefault_info_t *pfinfo)
{
struct vcpu *curr = current;
unsigned long gfn;
if ( pfec & PFEC_page_shared )
return HVMCOPY_gfn_shared;
if ( flags & HVMCOPY_fault )
+ {
+ if ( pfinfo )
+ {
+ pfinfo->linear = addr;
+ pfinfo->ec = pfec;
+ }
+
hvm_inject_page_fault(pfec, addr);
+ }
return HVMCOPY_bad_gva_to_gfn;
}
gpa |= (paddr_t)gfn << PAGE_SHIFT;
{
return __hvm_copy(buf, paddr, size,
HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys,
- 0);
+ 0, NULL);
}
enum hvm_copy_result hvm_copy_from_guest_phys(
{
return __hvm_copy(buf, paddr, size,
HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys,
- 0);
+ 0, NULL);
}
enum hvm_copy_result hvm_copy_to_guest_virt(
- unsigned long vaddr, void *buf, int size, uint32_t pfec)
+ unsigned long vaddr, void *buf, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo)
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt,
- PFEC_page_present | PFEC_write_access | pfec);
+ PFEC_page_present | PFEC_write_access | pfec, pfinfo);
}
enum hvm_copy_result hvm_copy_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec)
+ void *buf, unsigned long vaddr, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo)
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
- PFEC_page_present | pfec);
+ PFEC_page_present | pfec, pfinfo);
}
enum hvm_copy_result hvm_fetch_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec)
+ void *buf, unsigned long vaddr, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo)
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
- PFEC_page_present | PFEC_insn_fetch | pfec);
+ PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
}
enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt,
- PFEC_page_present | PFEC_write_access | pfec);
+ PFEC_page_present | PFEC_write_access | pfec, NULL);
}
enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
- PFEC_page_present | pfec);
+ PFEC_page_present | pfec, NULL);
}
enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
{
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
- PFEC_page_present | PFEC_insn_fetch | pfec);
+ PFEC_page_present | PFEC_insn_fetch | pfec, NULL);
}
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
struct vcpu *v = current;
union vmx_inst_info info;
struct segment_register seg;
+ pagefault_info_t pfinfo;
unsigned long base, index, seg_base, disp, offset;
int scale, size;
goto gp_fault;
if ( poperandS != NULL &&
- hvm_copy_from_guest_virt(poperandS, base, size, 0)
+ hvm_copy_from_guest_virt(poperandS, base, size, 0, &pfinfo)
!= HVMCOPY_okay )
return X86EMUL_EXCEPTION;
decode->mem = base;
struct vcpu *v = current;
struct vmx_inst_decoded decode;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+ pagefault_info_t pfinfo;
unsigned long gpa = 0;
int rc;
gpa = nvcpu->nv_vvmcxaddr;
- rc = hvm_copy_to_guest_virt(decode.mem, &gpa, decode.len, 0);
+ rc = hvm_copy_to_guest_virt(decode.mem, &gpa, decode.len, 0, &pfinfo);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
{
struct vcpu *v = current;
struct vmx_inst_decoded decode;
+ pagefault_info_t pfinfo;
u64 value = 0;
int rc;
switch ( decode.type ) {
case VMX_INST_MEMREG_TYPE_MEMORY:
- rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0);
+ rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0, &pfinfo);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
break;
* HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
* ordinary machine memory.
* HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
- * mapping to a guest physical address. In this case
- * a page fault exception is automatically queued
- * for injection into the current HVM VCPU.
+ * mapping to a guest physical address. The
+ * pagefault_info_t structure will be filled in if
+ * provided, and a page fault exception is
+ * automatically queued for injection into the
+ * current HVM VCPU.
*/
+typedef struct pagefault_info
+{
+ unsigned long linear;
+ int ec;
+} pagefault_info_t;
+
enum hvm_copy_result hvm_copy_to_guest_virt(
- unsigned long vaddr, void *buf, int size, uint32_t pfec);
+ unsigned long vaddr, void *buf, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo);
enum hvm_copy_result hvm_copy_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec);
+ void *buf, unsigned long vaddr, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo);
enum hvm_copy_result hvm_fetch_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec);
+ void *buf, unsigned long vaddr, int size, uint32_t pfec,
+ pagefault_info_t *pfinfo);
/*
* As above (copy to/from a guest virtual address), but no fault is generated