pfec |= PFEC_user_mode;
rc = ((access_type == hvm_access_insn_fetch) ?
- hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo) :
- hvm_copy_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo));
+ hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
+ hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
switch ( rc )
{
(hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
pfec |= PFEC_user_mode;
- rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec, &pfinfo);
+ rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
switch ( rc )
{
hvm_access_insn_fetch,
hvmemul_ctxt->ctxt.addr_size,
&addr) &&
- hvm_fetch_from_guest_virt(hvmemul_ctxt->insn_buf, addr,
- sizeof(hvmemul_ctxt->insn_buf),
- pfec, NULL) == HVMCOPY_okay) ?
+ hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
+ sizeof(hvmemul_ctxt->insn_buf),
+ pfec, NULL) == HVMCOPY_okay) ?
sizeof(hvmemul_ctxt->insn_buf) : 0;
}
else
goto out;
}
- rc = hvm_copy_from_guest_virt(
+ rc = hvm_copy_from_guest_linear(
&tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
if ( rc != HVMCOPY_okay )
goto out;
hvm_get_segment_register(v, x86_seg_ldtr, &segr);
tss.ldt = segr.sel;
- rc = hvm_copy_to_guest_virt(prev_tr.base + offsetof(typeof(tss), eip),
- &tss.eip,
- offsetof(typeof(tss), trace) -
- offsetof(typeof(tss), eip),
- PFEC_page_present, &pfinfo);
+ rc = hvm_copy_to_guest_linear(prev_tr.base + offsetof(typeof(tss), eip),
+ &tss.eip,
+ offsetof(typeof(tss), trace) -
+ offsetof(typeof(tss), eip),
+ PFEC_page_present, &pfinfo);
if ( rc != HVMCOPY_okay )
goto out;
- rc = hvm_copy_from_guest_virt(
+ rc = hvm_copy_from_guest_linear(
&tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
/*
* Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
regs->eflags |= X86_EFLAGS_NT;
tss.back_link = prev_tr.sel;
- rc = hvm_copy_to_guest_virt(tr.base + offsetof(typeof(tss), back_link),
- &tss.back_link, sizeof(tss.back_link), 0,
- &pfinfo);
+ rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link),
+ &tss.back_link, sizeof(tss.back_link), 0,
+ &pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
exn_raised = 1;
else if ( rc != HVMCOPY_okay )
16 << segr.attr.fields.db,
&linear_addr) )
{
- rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0,
- &pfinfo);
+ rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
+ &pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
exn_raised = 1;
else if ( rc != HVMCOPY_okay )
#define HVMCOPY_from_guest (0u<<0)
#define HVMCOPY_to_guest (1u<<0)
#define HVMCOPY_phys (0u<<2)
-#define HVMCOPY_virt (1u<<2)
+#define HVMCOPY_linear (1u<<2)
static enum hvm_copy_result __hvm_copy(
void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
pagefault_info_t *pfinfo)
count = min_t(int, PAGE_SIZE - gpa, todo);
- if ( flags & HVMCOPY_virt )
+ if ( flags & HVMCOPY_linear )
{
gfn = paging_gva_to_gfn(curr, addr, &pfec);
if ( gfn == gfn_x(INVALID_GFN) )
HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
}
-enum hvm_copy_result hvm_copy_to_guest_virt(
- unsigned long vaddr, void *buf, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_to_guest_linear(
+ unsigned long addr, void *buf, int size, uint32_t pfec,
pagefault_info_t *pfinfo)
{
- return __hvm_copy(buf, vaddr, size,
- HVMCOPY_to_guest | HVMCOPY_virt,
+ return __hvm_copy(buf, addr, size,
+ HVMCOPY_to_guest | HVMCOPY_linear,
PFEC_page_present | PFEC_write_access | pfec, pfinfo);
}
-enum hvm_copy_result hvm_copy_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_from_guest_linear(
+ void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo)
{
- return __hvm_copy(buf, vaddr, size,
- HVMCOPY_from_guest | HVMCOPY_virt,
+ return __hvm_copy(buf, addr, size,
+ HVMCOPY_from_guest | HVMCOPY_linear,
PFEC_page_present | pfec, pfinfo);
}
-enum hvm_copy_result hvm_fetch_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_fetch_from_guest_linear(
+ void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo)
{
- return __hvm_copy(buf, vaddr, size,
- HVMCOPY_from_guest | HVMCOPY_virt,
+ return __hvm_copy(buf, addr, size,
+ HVMCOPY_from_guest | HVMCOPY_linear,
PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
}
return 0;
}
- rc = hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len, 0, NULL);
+ rc = hvm_copy_to_guest_linear((unsigned long)to, (void *)from, len, 0, NULL);
return rc ? len : 0; /* fake a copy_to_user() return code */
}
return 0;
}
- rc = hvm_copy_from_guest_virt(to, (unsigned long)from, len, 0, NULL);
+ rc = hvm_copy_from_guest_linear(to, (unsigned long)from, len, 0, NULL);
return rc ? len : 0; /* fake a copy_from_user() return code */
}
(hvm_long_mode_enabled(cur) &&
cs->attr.fields.l) ? 64 :
cs->attr.fields.db ? 32 : 16, &addr) &&
- (hvm_fetch_from_guest_virt(sig, addr, sizeof(sig),
- walk, NULL) == HVMCOPY_okay) &&
+ (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
+ walk, NULL) == HVMCOPY_okay) &&
(memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
{
regs->eip += sizeof(sig);
return rc;
if ( access_type == hvm_access_insn_fetch )
- rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
+ rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
else
- rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
+ rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
switch ( rc )
{
(!hvm_translate_linear_addr(
x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_virt(
+ !hvm_fetch_from_guest_linear(
sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
(!hvm_translate_linear_addr(
x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_virt(
+ !hvm_fetch_from_guest_linear(
sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
sh_ctxt->insn_buf_eip = regs->eip;
void *buf, paddr_t paddr, int size);
/*
- * Copy to/from a guest virtual address. @pfec should include PFEC_user_mode
+ * Copy to/from a guest linear address. @pfec should include PFEC_user_mode
* if emulating a user-mode access (CPL=3). All other flags in @pfec are
* managed by the called function: it is therefore optional for the caller
* to set them.
int ec;
} pagefault_info_t;
-enum hvm_copy_result hvm_copy_to_guest_virt(
- unsigned long vaddr, void *buf, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_to_guest_linear(
+ unsigned long addr, void *buf, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_copy_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_from_guest_linear(
+ void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_fetch_from_guest_virt(
- void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_fetch_from_guest_linear(
+ void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
#define HVM_HCALL_completed 0 /* hypercall completed - no further action */