unsigned int
gdb_arch_copy_from_user(void *dest, const void *src, unsigned len)
{
- return __copy_from_user(dest, src, len);
+ return copy_from_unsafe(dest, src, len);
}
unsigned int
gdb_arch_copy_to_user(void *dest, const void *src, unsigned len)
{
- return __copy_to_user(dest, src, len);
+ return copy_to_unsafe(dest, src, len);
}
void
{
shadow_l2e_t sl2e;
mfn_t gl1mfn;
- if ( (__copy_from_user(&sl2e,
+ if ( (copy_from_unsafe(&sl2e,
(sh_linear_l2_table(v)
+ shadow_l2_linear_offset(va)),
sizeof(sl2e)) != 0)
#endif /* SHOPT_OUT_OF_SYNC */
/* The only reasons for reserved bits to be set in shadow entries
* are the two "magic" shadow_l1e entries. */
- if ( likely((__copy_from_user(&sl1e,
+ if ( likely((copy_from_unsafe(&sl1e,
(sh_linear_l1_table(v)
+ shadow_l1_linear_offset(va)),
sizeof(sl1e)) == 0)
sh_linear_l4_table(v)[shadow_l4_linear_offset(linear)])
& _PAGE_PRESENT) )
return false;
- /* This must still be a copy-from-user because we don't have the
+ /* This must still be a copy-from-unsafe because we don't have the
* paging lock, and the higher-level shadows might disappear
* under our feet. */
- if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
+ if ( copy_from_unsafe(&sl3e, (sh_linear_l3_table(v)
+ shadow_l3_linear_offset(linear)),
sizeof (sl3e)) != 0 )
{
return false;
#endif
- /* This must still be a copy-from-user because we don't have the shadow
+ /* This must still be a copy-from-unsafe because we don't have the shadow
* lock, and the higher-level shadows might disappear under our feet. */
- if ( __copy_from_user(&sl2e,
+ if ( copy_from_unsafe(&sl2e,
sh_linear_l2_table(v) + shadow_l2_linear_offset(linear),
sizeof (sl2e)) != 0 )
{
* hold the paging lock yet. Check again with the lock held. */
paging_lock(d);
- /* This must still be a copy-from-user because we didn't
+ /* This must still be a copy-from-unsafe because we didn't
* have the paging lock last time we checked, and the
* higher-level shadows might have disappeared under our
* feet. */
- if ( __copy_from_user(&sl2e,
+ if ( copy_from_unsafe(&sl2e,
sh_linear_l2_table(v)
+ shadow_l2_linear_offset(linear),
sizeof (sl2e)) != 0 )
addr = (uint32_t)addr;
- if ( (rc = __copy_from_user(p_data, (void *)addr, bytes)) )
+ if ( (rc = __copy_from_guest_pv(p_data, (void __user *)addr, bytes)) )
{
/*
* TODO: This should report PFEC_insn_fetch when goc->insn_fetch &&
* cpu_has_nx, but we'd then need a "fetch" variant of
- * __copy_from_user() respecting NX, SMEP, and protection keys.
+ * __copy_from_guest_pv() respecting NX, SMEP, and protection keys.
*/
x86_emul_pagefault(0, addr + bytes - rc, ctxt);
return X86EMUL_EXCEPTION;
if ( rc != X86EMUL_OKAY )
return rc;
- if ( (rc = __copy_to_user((void *)addr, &data, bytes_per_rep)) != 0 )
+ if ( (rc = __copy_to_guest_pv((void __user *)addr, &data,
+ bytes_per_rep)) != 0 )
{
x86_emul_pagefault(PFEC_write_access,
addr + bytes_per_rep - rc, ctxt);
if ( rc != X86EMUL_OKAY )
return rc;
- if ( (rc = __copy_from_user(&data, (void *)addr, bytes_per_rep)) != 0 )
+ if ( (rc = __copy_from_guest_pv(&data, (void __user *)addr,
+ bytes_per_rep)) != 0 )
{
x86_emul_pagefault(0, addr + bytes_per_rep - rc, ctxt);
return X86EMUL_EXCEPTION;
if ( rc != X86EMUL_OKAY )
return rc;
- if ( (rc = __copy_from_user(p_data, (void *)addr, bytes)) != 0 )
+ if ( (rc = __copy_from_guest_pv(p_data, (void __user *)addr, bytes)) != 0 )
{
/*
* TODO: This should report PFEC_insn_fetch when goc->insn_fetch &&
* cpu_has_nx, but we'd then need a "fetch" variant of
- * __copy_from_user() respecting NX, SMEP, and protection keys.
+ * __copy_from_guest_pv() respecting NX, SMEP, and protection keys.
*/
x86_emul_pagefault(0, addr + bytes - rc, ctxt);
return X86EMUL_EXCEPTION;
return NULL;
/* Find this l1e and its enclosing l1mfn in the linear map. */
- if ( __copy_from_user(&l2e,
+ if ( copy_from_unsafe(&l2e,
&__linear_l2_table[l2_linear_offset(linear)],
sizeof(l2_pgentry_t)) )
return NULL;
toggle_guest_pt(curr);
if ( unlikely(!__addr_ok(linear)) ||
- __copy_from_user(&l1e,
+ copy_from_unsafe(&l1e,
&__linear_l1_table[l1_linear_offset(linear)],
sizeof(l1_pgentry_t)) )
l1e = l1e_empty();
unsigned long addr = offset;
if ( !__addr_ok(addr) ||
- (rc = __copy_from_user(p_data, (void *)addr, bytes)) )
+ (rc = __copy_from_guest_pv(p_data, (void *)addr, bytes)) )
{
x86_emul_pagefault(0, addr + bytes - rc, ctxt); /* Read fault. */
return X86EMUL_EXCEPTION;
}
if ( !is_active_kernel_text(regs->rip) ||
- __copy_from_user(bug_insn, eip, sizeof(bug_insn)) ||
+ copy_from_unsafe(bug_insn, eip, sizeof(bug_insn)) ||
memcmp(bug_insn, "\xf\xb", sizeof(bug_insn)) )
goto die;
unsigned copy_to_user(void __user *to, const void *from, unsigned n)
{
if ( access_ok(to, n) )
- n = __copy_to_user(to, from, n);
+ n = __copy_to_guest_pv(to, from, n);
return n;
}
unsigned copy_from_user(void *to, const void __user *from, unsigned n)
{
if ( access_ok(from, n) )
- n = __copy_from_user(to, from, n);
+ n = __copy_from_guest_pv(to, from, n);
else
memset(to, 0, n);
return n;
#define __raw_copy_to_guest(dst, src, len) \
(is_hvm_vcpu(current) ? \
copy_to_user_hvm((dst), (src), (len)) : \
- __copy_to_user((dst), (src), (len)))
+ __copy_to_guest_pv(dst, src, len))
#define __raw_copy_from_guest(dst, src, len) \
(is_hvm_vcpu(current) ? \
copy_from_user_hvm((dst), (src), (len)) : \
- __copy_from_user((dst), (src), (len)))
+ __copy_from_guest_pv(dst, src, len))
#define __raw_clear_guest(dst, len) \
(is_hvm_vcpu(current) ? \
clear_user_hvm((dst), (len)) : \
#define get_guest_size get_unsafe_size
/**
- * __copy_to_user: - Copy a block of data into user space, with less checking
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
+ * __copy_to_guest_pv: - Copy a block of data into guest space, with less
+ * checking
+ * @to: Destination address, in guest space.
+ * @from: Source address, in hypervisor space.
* @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
- *
- * Copy data from kernel space to user space. Caller must check
+ * Copy data from hypervisor space to guest space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static always_inline unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+__copy_to_guest_pv(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
unsigned long ret;
}
return __copy_to_user_ll(to, from, n);
}
+#define copy_to_unsafe __copy_to_guest_pv
/**
- * __copy_from_user: - Copy a block of data from user space, with less checking
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
+ * __copy_from_guest_pv: - Copy a block of data from guest space, with less
+ * checking
+ * @to: Destination address, in hypervisor space.
+ * @from: Source address, in guest space.
* @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
- *
- * Copy data from user space to kernel space. Caller must check
+ * Copy data from guest space to hypervisor space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* data to the requested size using zero bytes.
*/
static always_inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+__copy_from_guest_pv(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
unsigned long ret;
}
return __copy_from_user_ll(to, from, n);
}
+#define copy_from_unsafe __copy_from_guest_pv
/*
* The exception table consists of pairs of addresses: the first is the