case HVMCOPY_okay:
break;
case HVMCOPY_bad_gva_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
return X86EMUL_EXCEPTION;
case HVMCOPY_bad_gfn_to_mfn:
if ( access_type == hvm_access_insn_fetch )
case HVMCOPY_okay:
break;
case HVMCOPY_bad_gva_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
return X86EMUL_EXCEPTION;
case HVMCOPY_bad_gfn_to_mfn:
return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
rc = hvm_copy_from_guest_linear(
&tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMCOPY_okay )
goto out;
offsetof(typeof(tss), trace) -
offsetof(typeof(tss), eip),
PFEC_page_present, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMCOPY_okay )
goto out;
rc = hvm_copy_from_guest_linear(
&tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
/*
* Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
* functions knew we want RO access.
&tss.back_link, sizeof(tss.back_link), 0,
&pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
+ {
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
exn_raised = 1;
+ }
else if ( rc != HVMCOPY_okay )
goto out;
}
rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
&pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
+ {
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
exn_raised = 1;
+ }
else if ( rc != HVMCOPY_okay )
goto out;
}
{
pfinfo->linear = addr;
pfinfo->ec = pfec;
-
- hvm_inject_page_fault(pfec, addr);
}
return HVMCOPY_bad_gva_to_gfn;
}
struct vcpu *v = current;
union vmx_inst_info info;
struct segment_register seg;
- pagefault_info_t pfinfo;
unsigned long base, index, seg_base, disp, offset;
int scale, size;
offset + size - 1 > seg.limit) )
goto gp_fault;
- if ( poperandS != NULL &&
- hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo)
- != HVMCOPY_okay )
- return X86EMUL_EXCEPTION;
+ if ( poperandS != NULL )
+ {
+ pagefault_info_t pfinfo;
+ int rc = hvm_copy_from_guest_linear(poperandS, base, size,
+ 0, &pfinfo);
+
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
+ if ( rc != HVMCOPY_okay )
+ return X86EMUL_EXCEPTION;
+ }
decode->mem = base;
decode->len = size;
}
gpa = nvcpu->nv_vvmcxaddr;
rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
switch ( decode.type ) {
case VMX_INST_MEMREG_TYPE_MEMORY:
rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
break;
case HVMCOPY_okay:
return X86EMUL_OKAY;
case HVMCOPY_bad_gva_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &sh_ctxt->ctxt);
return X86EMUL_EXCEPTION;
case HVMCOPY_bad_gfn_to_mfn:
case HVMCOPY_unhandleable:
* HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
* mapping to a guest physical address. The
* pagefault_info_t structure will be filled in if
- * provided, and a page fault exception is
- * automatically queued for injection into the
- * current HVM VCPU.
+ * provided.
*/
typedef struct pagefault_info
{