* @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
* Returns number of bytes failed to copy (0 == complete success).
*/
-static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
+static enum hvm_copy_result __hvm_copy(
+ void *buf, paddr_t addr, int size, int dir, int virt, int *ptodo)
{
unsigned long gfn, mfn;
char *p;
- int count, todo;
+ int count, todo, rc = HVMCOPY_okay;
todo = size;
while ( todo > 0 )
count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
if ( virt )
+ {
gfn = paging_gva_to_gfn(current, addr);
+ if ( gfn == INVALID_GFN )
+ {
+ rc = HVMCOPY_bad_gva_to_gfn;
+ goto out;
+ }
+ }
else
+ {
gfn = addr >> PAGE_SHIFT;
-
+ }
+
mfn = get_mfn_from_gpfn(gfn);
if ( mfn == INVALID_MFN )
- return todo;
+ {
+ rc = HVMCOPY_bad_gfn_to_mfn;
+ goto out;
+ }
p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
todo -= count;
}
- return 0;
+ out:
+ if ( ptodo )
+ *ptodo = todo;
+ return rc;
}
-int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
+enum hvm_copy_result hvm_copy_to_guest_phys(
+ paddr_t paddr, void *buf, int size)
{
- return __hvm_copy(buf, paddr, size, 1, 0);
+ return __hvm_copy(buf, paddr, size, 1, 0, NULL);
}
-int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
+enum hvm_copy_result hvm_copy_from_guest_phys(
+ void *buf, paddr_t paddr, int size)
{
- return __hvm_copy(buf, paddr, size, 0, 0);
+ return __hvm_copy(buf, paddr, size, 0, 0, NULL);
}
-int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
+enum hvm_copy_result hvm_copy_to_guest_virt(
+ unsigned long vaddr, void *buf, int size, int *ptodo)
{
- return __hvm_copy(buf, vaddr, size, 1, 1);
+ return __hvm_copy(buf, vaddr, size, 1, 1, ptodo);
}
-int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
+enum hvm_copy_result hvm_copy_from_guest_virt(
+ void *buf, unsigned long vaddr, int size, int *ptodo)
{
- return __hvm_copy(buf, vaddr, size, 0, 1);
+ return __hvm_copy(buf, vaddr, size, 0, 1, ptodo);
}
-
/* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
void hvm_print_line(struct vcpu *v, const char c)
{
unsigned long addr = pio_opp->addr;
if ( hvm_paging_enabled(current) )
{
- int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
- if ( rv != 0 )
+ int rv, todo;
+ rv = hvm_copy_to_guest_virt(addr, &p->data, p->size,
+ &todo);
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
{
/* Failed on the page-spanning copy. Inject PF into
* the guest for the address where we failed. */
- addr += p->size - rv;
+ addr += p->size - todo;
gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side "
"of a page-spanning PIO: va=%#lx\n", addr);
hvm_inject_exception(TRAP_page_fault,
if (hvm_paging_enabled(current))
{
- int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
- if ( rv != 0 )
+ int rv, todo;
+ rv = hvm_copy_to_guest_virt(addr, &p->data, p->size, &todo);
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
{
/* Failed on the page-spanning copy. Inject PF into
* the guest for the address where we failed. */
- addr += p->size - rv;
+ addr += p->size - todo;
gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of "
"a page-spanning MMIO: va=%#lx\n", addr);
hvm_inject_exception(TRAP_page_fault,
mmio_opp->addr += hvm_get_segment_base(current, x86_seg_ss);
{
unsigned long addr = mmio_opp->addr;
- int rv = hvm_copy_to_guest_virt(addr, &p->data, size);
- if ( rv != 0 )
+ int rv, todo;
+ rv = hvm_copy_to_guest_virt(addr, &p->data, size, &todo);
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
{
- addr += p->size - rv;
+ addr += p->size - todo;
gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO:"
" va=%#lx\n", addr);
hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr);
}
}
-int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
+int inst_copy_from_guest(
+ unsigned char *buf, unsigned long guest_eip, int inst_len)
{
if ( inst_len > MAX_INST_LEN || inst_len <= 0 )
return 0;
- if ( hvm_copy_from_guest_virt(buf, guest_eip, inst_len) )
+ if ( hvm_copy_from_guest_virt(buf, guest_eip, inst_len, NULL) )
return 0;
return inst_len;
}
if ( dir == IOREQ_WRITE ) {
if ( hvm_paging_enabled(v) )
{
- int rv = hvm_copy_from_guest_virt(&value, addr, size);
- if ( rv != 0 )
+ int rv, todo;
+ rv = hvm_copy_from_guest_virt(&value, addr, size, &todo);
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
{
/* Failed on the page-spanning copy. Inject PF into
* the guest for the address where we failed */
regs->eip -= inst_len; /* do not advance %eip */
regs->eflags |= X86_EFLAGS_RF; /* RF was set by #PF */
/* Must set CR2 at the failing address */
- addr += size - rv;
+ addr += size - todo;
gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a "
"page-spanning MMIO: va=%#lx\n", addr);
hvm_inject_exception(TRAP_page_fault, 0, addr);
this. */
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
{
+ int todo;
+
if ( this_cpu(guest_handles_in_xen_space) )
{
memcpy(to, from, len);
return 0;
}
- return hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len);
+ hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len, &todo);
+ return todo;
}
unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
{
+ int todo;
+
if ( this_cpu(guest_handles_in_xen_space) )
{
memcpy(to, from, len);
return 0;
}
- return hvm_copy_from_guest_virt(to, (unsigned long)from, len);
+ hvm_copy_from_guest_virt(to, (unsigned long)from, len, &todo);
+ return todo;
}
/*
{
if ( hvm_paging_enabled(current) )
{
- int rv = hvm_copy_from_guest_virt(&value, addr, size);
- if ( rv != 0 )
+ int rv, todo;
+ rv = hvm_copy_from_guest_virt(&value, addr, size, &todo);
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
{
/* Failed on the page-spanning copy. Inject PF into
* the guest for the address where we failed. */
- addr += size - rv;
+ addr += size - todo;
gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
"of a page-spanning PIO: va=%#lx\n", addr);
svm_hvm_inject_exception(TRAP_page_fault, 0, addr);
offset = ( addr_size == 4 ) ? offset : ( offset & 0xFFFF );
addr = hvm_get_segment_base(v, seg);
addr += offset;
- hvm_copy_to_guest_virt(addr,&value,2);
+ hvm_copy_to_guest_virt(addr, &value, 2, NULL);
}
else
{
}
__update_guest_eip(vmcb, inst_len);
-
+
return result;
}
{
if ( hvm_paging_enabled(current) )
{
- int rv = hvm_copy_from_guest_virt(&value, addr, size);
- if ( rv != 0 )
+ int rv, todo;
+ rv = hvm_copy_from_guest_virt(&value, addr, size, &todo);
+ if ( rv == HVMCOPY_bad_gva_to_gfn )
{
/* Failed on the page-spanning copy. Inject PF into
* the guest for the address where we failed. */
- addr += size - rv;
+ addr += size - todo;
gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
"of a page-spanning PIO: va=%#lx\n", addr);
vmx_inject_exception(TRAP_page_fault, 0, addr);
struct sh_emulate_ctxt *sh_ctxt)
{
unsigned long addr;
- int rc, errcode;
+ int rc, errcode, todo;
rc = hvm_translate_linear_addr(
seg, offset, bytes, access_type, sh_ctxt, &addr);
// It entirely ignores the permissions in the page tables.
// In this case, that is only a user vs supervisor access check.
//
- if ( (rc = hvm_copy_from_guest_virt(val, addr, bytes)) == 0 )
+ rc = hvm_copy_from_guest_virt(val, addr, bytes, &todo);
+ switch ( rc )
+ {
+ case HVMCOPY_okay:
return X86EMUL_OKAY;
+ case HVMCOPY_bad_gva_to_gfn:
+ break;
+ default:
+ return X86EMUL_UNHANDLEABLE;
+ }
/* If we got here, there was nothing mapped here, or a bad GFN
* was mapped here. This should never happen: we're here because
errcode = ring_3(sh_ctxt->ctxt.regs) ? PFEC_user_mode : 0;
if ( access_type == hvm_access_insn_fetch )
errcode |= PFEC_insn_fetch;
- hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc);
+ hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - todo);
return X86EMUL_EXCEPTION;
}
x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
!hvm_copy_from_guest_virt(
- sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
+ sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
return &hvm_shadow_emulator_ops;
x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
!hvm_copy_from_guest_virt(
- sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
+ sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
sh_ctxt->insn_buf_eip = regs->eip;
}
/* Check that the user is allowed to perform this write.
* Returns a mapped pointer to write to, and the mfn it's on,
* or NULL for error. */
-static inline void * emulate_map_dest(struct vcpu *v,
- unsigned long vaddr,
- struct sh_emulate_ctxt *sh_ctxt,
- mfn_t *mfnp)
+#define MAPPING_UNHANDLEABLE ((void *)0)
+#define MAPPING_EXCEPTION ((void *)1)
+#define emulate_map_dest_failed(rc) ((unsigned long)(rc) <= 1)
+static inline void *emulate_map_dest(struct vcpu *v,
+ unsigned long vaddr,
+ struct sh_emulate_ctxt *sh_ctxt,
+ mfn_t *mfnp)
{
walk_t gw;
u32 flags, errcode;
/* We don't emulate user-mode writes to page tables */
if ( ring_3(sh_ctxt->ctxt.regs) )
- return NULL;
+ return MAPPING_UNHANDLEABLE;
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
/* Try the virtual TLB first */
return sh_map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
}
else
- return NULL;
+ return MAPPING_UNHANDLEABLE;
page_fault:
if ( is_hvm_vcpu(v) )
hvm_inject_exception(TRAP_page_fault, errcode, vaddr);
else
propagate_page_fault(vaddr, errcode);
- return NULL;
+ return MAPPING_EXCEPTION;
}
static int safe_not_to_verify_write(mfn_t gmfn, void *dst, void *src,
ASSERT(shadow_locked_by_me(v->domain));
ASSERT(((vaddr & ~PAGE_MASK) + bytes) <= PAGE_SIZE);
- if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
- return X86EMUL_EXCEPTION;
+ addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
+ if ( emulate_map_dest_failed(addr) )
+ return ((addr == MAPPING_EXCEPTION) ?
+ X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
skip = safe_not_to_verify_write(mfn, addr, src, bytes);
memcpy(addr, src, bytes);
if ( vaddr & (bytes-1) )
return X86EMUL_UNHANDLEABLE;
- if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
- return X86EMUL_EXCEPTION;
+ addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
+ if ( emulate_map_dest_failed(addr) )
+ return ((addr == MAPPING_EXCEPTION) ?
+ X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
skip = safe_not_to_verify_write(mfn, &new, &old, bytes);
if ( vaddr & 7 )
return X86EMUL_UNHANDLEABLE;
- if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
- return X86EMUL_EXCEPTION;
+ addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
+ if ( emulate_map_dest_failed(addr) )
+ return ((addr == MAPPING_EXCEPTION) ?
+ X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
old = (((u64) old_hi) << 32) | (u64) old_lo;
new = (((u64) new_hi) << 32) | (u64) new_lo;
void hvm_enable(struct hvm_function_table *);
void hvm_disable(void);
-int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
-int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
-int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
-int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result {
+ HVMCOPY_okay = 0,
+ HVMCOPY_bad_gva_to_gfn,
+ HVMCOPY_bad_gfn_to_mfn
+};
+
+/*
+ * Copy to/from a guest physical address.
+ * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
+ * address range does not map entirely onto ordinary machine memory.
+ */
+enum hvm_copy_result hvm_copy_to_guest_phys(
+ paddr_t paddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_phys(
+ void *buf, paddr_t paddr, int size);
+
+/*
+ * Copy to/from a guest virtual address.
+ * Returns:
+ * HVMCOPY_okay: Copy was entirely successful.
+ * HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
+ * ordinary machine memory.
+ * HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
+ * mapping to a guest physical address.
+ */
+enum hvm_copy_result hvm_copy_to_guest_virt(
+ unsigned long vaddr, void *buf, int size, int *ptodo);
+enum hvm_copy_result hvm_copy_from_guest_virt(
+ void *buf, unsigned long vaddr, int size, int *ptodo);
void hvm_print_line(struct vcpu *v, const char c);
void hlt_timer_fn(void *data);