pfec |= PFEC_implicit;
else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
pfec |= PFEC_user_mode;
+ if ( access_type == hvm_access_insn_fetch )
+ pfec |= PFEC_insn_fetch;
rc = hvmemul_virtual_to_linear(
seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
(vio->mmio_gla == (addr & PAGE_MASK)) )
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
- rc = ((access_type == hvm_access_insn_fetch) ?
- hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
- hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
+ rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
switch ( rc )
{
hvm_access_insn_fetch,
&hvmemul_ctxt->seg_reg[x86_seg_cs],
&addr) &&
- hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
- sizeof(hvmemul_ctxt->insn_buf),
- pfec, NULL) == HVMTRANS_okay) ?
+ hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
+ sizeof(hvmemul_ctxt->insn_buf),
+ pfec | PFEC_insn_fetch,
+ NULL) == HVMTRANS_okay) ?
sizeof(hvmemul_ctxt->insn_buf) : 0;
}
else
PFEC_page_present | pfec, pfinfo);
}
-enum hvm_translation_result hvm_fetch_from_guest_linear(
- void *buf, unsigned long addr, int size, uint32_t pfec,
- pagefault_info_t *pfinfo)
-{
- return __hvm_copy(buf, addr, size, current,
- HVMCOPY_from_guest | HVMCOPY_linear,
- PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
-}
-
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
{
int rc;
if ( opt_hvm_fep )
{
const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
- uint32_t walk = (ctxt.seg_reg[x86_seg_ss].dpl == 3)
- ? PFEC_user_mode : 0;
+ uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
+ ? PFEC_user_mode : 0) | PFEC_insn_fetch;
unsigned long addr;
char sig[5]; /* ud2; .ascii "xen" */
if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
sizeof(sig), hvm_access_insn_fetch,
cs, &addr) &&
- (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
- walk, NULL) == HVMTRANS_okay) &&
+ (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
+ walk, NULL) == HVMTRANS_okay) &&
(memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
{
regs->rip += sizeof(sig);
(!hvm_translate_virtual_addr(
x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_linear(
- sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+ !hvm_copy_from_guest_linear(
+ sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+ PFEC_insn_fetch, NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
return &hvm_shadow_emulator_ops;
(!hvm_translate_virtual_addr(
x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_linear(
- sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+ !hvm_copy_from_guest_linear(
+ sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+ PFEC_insn_fetch, NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
sh_ctxt->insn_buf_eip = regs->rip;
}
if ( rc || !bytes )
return rc;
- if ( access_type == hvm_access_insn_fetch )
- rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
- else
- rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
+ rc = hvm_copy_from_guest_linear(p_data, addr, bytes,
+ (access_type == hvm_access_insn_fetch
+ ? PFEC_insn_fetch : 0),
+ &pfinfo);
switch ( rc )
{
enum hvm_translation_result hvm_copy_from_guest_linear(
void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
-enum hvm_translation_result hvm_fetch_from_guest_linear(
- void *buf, unsigned long addr, int size, uint32_t pfec,
- pagefault_info_t *pfinfo);
/*
* Get a reference on the page under an HVM physical or linear address. If