ia64/xen-unstable

view xen/include/asm-x86/x86_64/uaccess.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 057c4c2991fa
children ebbd0e8c3e72
line source
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
4 /*
5 * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
6 * This is also valid for range checks (addr, addr+size). As long as the
7 * start address is outside the Xen-reserved area then we will access a
8 * non-canonical address (and thus fault) before ever reaching VIRT_START.
9 */
10 #define __addr_ok(addr) \
11 (((unsigned long)(addr) < (1UL<<48)) || \
12 ((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
14 #define access_ok(addr, size) (__addr_ok(addr))
16 #define array_access_ok(addr, count, size) (__addr_ok(addr))
18 #ifdef CONFIG_COMPAT
20 #define __compat_addr_ok(addr) \
21 ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(current->domain))
23 #define compat_access_ok(addr, size) \
24 __compat_addr_ok((unsigned long)(addr) + ((size) ? (size) - 1 : 0))
26 #define compat_array_access_ok(addr,count,size) \
27 (likely((count) < (~0U / (size))) && \
28 compat_access_ok(addr, (count) * (size)))
30 #endif
32 #define __put_user_size(x,ptr,size,retval,errret) \
33 do { \
34 retval = 0; \
35 switch (size) { \
36 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \
37 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
38 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",errret);break; \
39 case 8: __put_user_asm(x,ptr,retval,"q","","ir",errret);break; \
40 default: __put_user_bad(); \
41 } \
42 } while (0)
44 #define __get_user_size(x,ptr,size,retval,errret) \
45 do { \
46 retval = 0; \
47 switch (size) { \
48 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \
49 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
50 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",errret);break; \
51 case 8: __get_user_asm(x,ptr,retval,"q","","=r",errret); break; \
52 default: __get_user_bad(); \
53 } \
54 } while (0)
56 #endif /* __X86_64_UACCESS_H */