ia64/xen-unstable

view xen/include/asm-ia64/linux-xen/asm/uaccess.h @ 6457:d34925e4144b

Stil more cleanup and moving to 2.6.13 base
author djm@kirby.fc.hp.com
date Thu Sep 01 11:09:27 2005 -0600 (2005-09-01)
parents 9312a3e8a6f8
children b2f4823b6ff0
line source
1 #ifndef _ASM_IA64_UACCESS_H
2 #define _ASM_IA64_UACCESS_H
4 /*
5 * This file defines various macros to transfer memory areas across
6 * the user/kernel boundary. This needs to be done carefully because
7 * this code is executed in kernel mode and uses user-specified
8 * addresses. Thus, we need to be careful not to let the user to
9 * trick us into accessing kernel memory that would normally be
10 * inaccessible. This code is also fairly performance sensitive,
11 * so we want to spend as little time doing safety checks as
12 * possible.
13 *
14 * To make matters a bit more interesting, these macros sometimes also
15 * called from within the kernel itself, in which case the address
16 * validity check must be skipped. The get_fs() macro tells us what
17 * to do: if get_fs()==USER_DS, checking is performed, if
18 * get_fs()==KERNEL_DS, checking is bypassed.
19 *
20 * Note that even if the memory area specified by the user is in a
21 * valid address range, it is still possible that we'll get a page
22 * fault while accessing it. This is handled by filling out an
23 * exception handler fixup entry for each instruction that has the
24 * potential to fault. When such a fault occurs, the page fault
25 * handler checks to see whether the faulting instruction has a fixup
26 * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
27 * then resumes execution at the continuation point.
28 *
29 * Based on <asm-alpha/uaccess.h>.
30 *
31 * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
32 * David Mosberger-Tang <davidm@hpl.hp.com>
33 */
35 #include <linux/compiler.h>
36 #include <linux/errno.h>
37 #include <linux/sched.h>
38 #include <linux/page-flags.h>
39 #include <linux/mm.h>
41 #include <asm/intrinsics.h>
42 #include <asm/pgtable.h>
43 #include <asm/io.h>
45 /*
46 * For historical reasons, the following macros are grossly misnamed:
47 */
48 #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
49 #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
51 #define VERIFY_READ 0
52 #define VERIFY_WRITE 1
54 #define get_ds() (KERNEL_DS)
55 #define get_fs() (current_thread_info()->addr_limit)
56 #define set_fs(x) (current_thread_info()->addr_limit = (x))
58 #define segment_eq(a, b) ((a).seg == (b).seg)
60 /*
61 * When accessing user memory, we need to make sure the entire area really is in
62 * user-level space. In order to do this efficiently, we make sure that the page at
63 * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
64 * point inside the virtually mapped linear page table.
65 */
66 #ifdef XEN
67 #define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
68 #define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned long)(addr)))
69 #else
70 #define __access_ok(addr, size, segment) \
71 ({ \
72 __chk_user_ptr(addr); \
73 (likely((unsigned long) (addr) <= (segment).seg) \
74 && ((segment).seg == KERNEL_DS.seg \
75 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
76 })
77 #endif
78 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
80 /* this function will go away soon - use access_ok() instead */
81 static inline int __deprecated
82 verify_area (int type, const void __user *addr, unsigned long size)
83 {
84 return access_ok(type, addr, size) ? 0 : -EFAULT;
85 }
87 /*
88 * These are the main single-value transfer routines. They automatically
89 * use the right size if we just have the right pointer type.
90 *
91 * Careful to not
92 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
93 * (b) require any knowledge of processes at this stage
94 */
95 #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
96 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
98 /*
99 * The "__xxx" versions do not do address space checking, useful when
100 * doing multiple accesses to the same area (the programmer has to do the
101 * checks by hand with "access_ok()")
102 */
103 #define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
104 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
106 extern long __put_user_unaligned_unknown (void);
108 #define __put_user_unaligned(x, ptr) \
109 ({ \
110 long __ret; \
111 switch (sizeof(*(ptr))) { \
112 case 1: __ret = __put_user((x), (ptr)); break; \
113 case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \
114 | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
115 case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \
116 | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
117 case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \
118 | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
119 default: __ret = __put_user_unaligned_unknown(); \
120 } \
121 __ret; \
122 })
124 extern long __get_user_unaligned_unknown (void);
126 #define __get_user_unaligned(x, ptr) \
127 ({ \
128 long __ret; \
129 switch (sizeof(*(ptr))) { \
130 case 1: __ret = __get_user((x), (ptr)); break; \
131 case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \
132 | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
133 case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \
134 | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
135 case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \
136 | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
137 default: __ret = __get_user_unaligned_unknown(); \
138 } \
139 __ret; \
140 })
142 #ifdef ASM_SUPPORTED
143 struct __large_struct { unsigned long buf[100]; };
144 # define __m(x) (*(struct __large_struct __user *)(x))
146 /* We need to declare the __ex_table section before we can use it in .xdata. */
147 asm (".section \"__ex_table\", \"a\"\n\t.previous");
149 # define __get_user_size(val, addr, n, err) \
150 do { \
151 register long __gu_r8 asm ("r8") = 0; \
152 register long __gu_r9 asm ("r9"); \
153 asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
154 "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \
155 "[1:]" \
156 : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \
157 (err) = __gu_r8; \
158 (val) = __gu_r9; \
159 } while (0)
161 /*
162 * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This
163 * is because they do not write to any memory gcc knows about, so there are no aliasing
164 * issues.
165 */
166 # define __put_user_size(val, addr, n, err) \
167 do { \
168 register long __pu_r8 asm ("r8") = 0; \
169 asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
170 "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
171 "[1:]" \
172 : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \
173 (err) = __pu_r8; \
174 } while (0)
176 #else /* !ASM_SUPPORTED */
177 # define RELOC_TYPE 2 /* ip-rel */
178 # define __get_user_size(val, addr, n, err) \
179 do { \
180 __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \
181 (err) = ia64_getreg(_IA64_REG_R8); \
182 (val) = ia64_getreg(_IA64_REG_R9); \
183 } while (0)
184 # define __put_user_size(val, addr, n, err) \
185 do { \
186 __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \
187 (err) = ia64_getreg(_IA64_REG_R8); \
188 } while (0)
189 #endif /* !ASM_SUPPORTED */
191 extern void __get_user_unknown (void);
193 /*
194 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
195 * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while
196 * using r8/r9.
197 */
198 #define __do_get_user(check, x, ptr, size, segment) \
199 ({ \
200 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
201 __typeof__ (size) __gu_size = (size); \
202 long __gu_err = -EFAULT, __gu_val = 0; \
203 \
204 if (!check || __access_ok(__gu_ptr, size, segment)) \
205 switch (__gu_size) { \
206 case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
207 case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \
208 case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \
209 case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \
210 default: __get_user_unknown(); break; \
211 } \
212 (x) = (__typeof__(*(__gu_ptr))) __gu_val; \
213 __gu_err; \
214 })
216 #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS)
217 #define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment)
219 extern void __put_user_unknown (void);
221 /*
222 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
223 * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8.
224 */
225 #define __do_put_user(check, x, ptr, size, segment) \
226 ({ \
227 __typeof__ (x) __pu_x = (x); \
228 __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
229 __typeof__ (size) __pu_size = (size); \
230 long __pu_err = -EFAULT; \
231 \
232 if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \
233 switch (__pu_size) { \
234 case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \
235 case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \
236 case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \
237 case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \
238 default: __put_user_unknown(); break; \
239 } \
240 __pu_err; \
241 })
243 #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS)
244 #define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment)
246 /*
247 * Complex access routines
248 */
249 extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
250 unsigned long count);
252 static inline unsigned long
253 __copy_to_user (void __user *to, const void *from, unsigned long count)
254 {
255 return __copy_user(to, (void __user *) from, count);
256 }
258 static inline unsigned long
259 __copy_from_user (void *to, const void __user *from, unsigned long count)
260 {
261 return __copy_user((void __user *) to, from, count);
262 }
264 #define __copy_to_user_inatomic __copy_to_user
265 #define __copy_from_user_inatomic __copy_from_user
266 #define copy_to_user(to, from, n) \
267 ({ \
268 void __user *__cu_to = (to); \
269 const void *__cu_from = (from); \
270 long __cu_len = (n); \
271 \
272 if (__access_ok(__cu_to, __cu_len, get_fs())) \
273 __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \
274 __cu_len; \
275 })
277 #define copy_from_user(to, from, n) \
278 ({ \
279 void *__cu_to = (to); \
280 const void __user *__cu_from = (from); \
281 long __cu_len = (n); \
282 \
283 __chk_user_ptr(__cu_from); \
284 if (__access_ok(__cu_from, __cu_len, get_fs())) \
285 __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \
286 __cu_len; \
287 })
289 #define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
291 static inline unsigned long
292 copy_in_user (void __user *to, const void __user *from, unsigned long n)
293 {
294 if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)))
295 n = __copy_user(to, from, n);
296 return n;
297 }
299 extern unsigned long __do_clear_user (void __user *, unsigned long);
301 #define __clear_user(to, n) __do_clear_user(to, n)
303 #define clear_user(to, n) \
304 ({ \
305 unsigned long __cu_len = (n); \
306 if (__access_ok(to, __cu_len, get_fs())) \
307 __cu_len = __do_clear_user(to, __cu_len); \
308 __cu_len; \
309 })
312 /*
313 * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else
314 * strlen.
315 */
316 extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
318 #define strncpy_from_user(to, from, n) \
319 ({ \
320 const char __user * __sfu_from = (from); \
321 long __sfu_ret = -EFAULT; \
322 if (__access_ok(__sfu_from, 0, get_fs())) \
323 __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
324 __sfu_ret; \
325 })
327 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
328 extern unsigned long __strlen_user (const char __user *);
330 #define strlen_user(str) \
331 ({ \
332 const char __user *__su_str = (str); \
333 unsigned long __su_ret = 0; \
334 if (__access_ok(__su_str, 0, get_fs())) \
335 __su_ret = __strlen_user(__su_str); \
336 __su_ret; \
337 })
339 /*
340 * Returns: 0 if exception before NUL or reaching the supplied limit
341 * (N), a value greater than N if the limit would be exceeded, else
342 * strlen.
343 */
344 extern unsigned long __strnlen_user (const char __user *, long);
346 #define strnlen_user(str, len) \
347 ({ \
348 const char __user *__su_str = (str); \
349 unsigned long __su_ret = 0; \
350 if (__access_ok(__su_str, 0, get_fs())) \
351 __su_ret = __strnlen_user(__su_str, len); \
352 __su_ret; \
353 })
355 /* Generic code can't deal with the location-relative format that we use for compactness. */
356 #define ARCH_HAS_SORT_EXTABLE
357 #define ARCH_HAS_SEARCH_EXTABLE
359 struct exception_table_entry {
360 int addr; /* location-relative address of insn this fixup is for */
361 int cont; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
362 };
364 extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e);
365 extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
367 static inline int
368 ia64_done_with_exception (struct pt_regs *regs)
369 {
370 const struct exception_table_entry *e;
371 e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
372 if (e) {
373 ia64_handle_exception(regs, e);
374 return 1;
375 }
376 return 0;
377 }
379 #ifndef XEN
380 #define ARCH_HAS_TRANSLATE_MEM_PTR 1
381 static __inline__ char *
382 xlate_dev_mem_ptr (unsigned long p)
383 {
384 struct page *page;
385 char * ptr;
387 page = pfn_to_page(p >> PAGE_SHIFT);
388 if (PageUncached(page))
389 ptr = (char *)p + __IA64_UNCACHED_OFFSET;
390 else
391 ptr = __va(p);
393 return ptr;
394 }
396 /*
397 * Convert a virtual cached kernel memory pointer to an uncached pointer
398 */
399 static __inline__ char *
400 xlate_dev_kmem_ptr (char * p)
401 {
402 struct page *page;
403 char * ptr;
405 page = virt_to_page((unsigned long)p >> PAGE_SHIFT);
406 if (PageUncached(page))
407 ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
408 else
409 ptr = p;
411 return ptr;
412 }
413 #endif
415 #endif /* _ASM_IA64_UACCESS_H */