ia64/xen-unstable

view xen/include/asm-x86/x86_64/uaccess.h @ 17859:08fb9a4489f7

32-on-64: Fix is_compat_arg_xlat_range().
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 13 13:48:13 2008 +0100 (2008-06-13)
parents ebbd0e8c3e72
children 67a0ffade665
line source
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
4 #define COMPAT_ARG_XLAT_VIRT_BASE this_cpu(compat_arg_xlat)
5 #define COMPAT_ARG_XLAT_SIZE PAGE_SIZE
6 DECLARE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
7 #define is_compat_arg_xlat_range(addr, size) ({ \
8 unsigned long __off; \
9 __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
10 (__off <= COMPAT_ARG_XLAT_SIZE) && \
11 ((__off + (unsigned long)(size)) <= COMPAT_ARG_XLAT_SIZE); \
12 })
14 /*
15 * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
16 * This is also valid for range checks (addr, addr+size). As long as the
17 * start address is outside the Xen-reserved area then we will access a
18 * non-canonical address (and thus fault) before ever reaching VIRT_START.
19 */
20 #define __addr_ok(addr) \
21 (((unsigned long)(addr) < (1UL<<48)) || \
22 ((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
24 #define access_ok(addr, size) \
25 (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
27 #define array_access_ok(addr, count, size) \
28 (access_ok(addr, (count)*(size)))
30 #define __compat_addr_ok(addr) \
31 ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(current->domain))
33 #define compat_access_ok(addr, size) \
34 __compat_addr_ok((unsigned long)(addr) + ((size) ? (size) - 1 : 0))
36 #define compat_array_access_ok(addr,count,size) \
37 (likely((count) < (~0U / (size))) && \
38 compat_access_ok(addr, (count) * (size)))
40 #define __put_user_size(x,ptr,size,retval,errret) \
41 do { \
42 retval = 0; \
43 switch (size) { \
44 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \
45 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
46 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",errret);break; \
47 case 8: __put_user_asm(x,ptr,retval,"q","","ir",errret);break; \
48 default: __put_user_bad(); \
49 } \
50 } while (0)
52 #define __get_user_size(x,ptr,size,retval,errret) \
53 do { \
54 retval = 0; \
55 switch (size) { \
56 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \
57 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \
58 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",errret);break; \
59 case 8: __get_user_asm(x,ptr,retval,"q","","=r",errret); break; \
60 default: __get_user_bad(); \
61 } \
62 } while (0)
64 #endif /* __X86_64_UACCESS_H */