ia64/xen-unstable

changeset 9048:0cb40b7f7840

xen/ia64 uaccess is still linux style with different declaration as
common xen. It's meaningless to patch original file with many #ifdef
XEN, and so instead copy that file to create a new xen specific
uaccess file, with unused lines removed. May need render later.

Or else it breaks compilation on xen-unstable.

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Feb 27 16:22:02 2006 +0100 (2006-02-27)
parents cc9ed0eea55a
children 38d9f4cbbc1e
files xen/include/asm-ia64/config.h xen/include/asm-ia64/linux-xen/asm/README.origin xen/include/asm-ia64/uaccess.h
line diff
     1.1 --- a/xen/include/asm-ia64/config.h	Mon Feb 27 16:20:23 2006 +0100
     1.2 +++ b/xen/include/asm-ia64/config.h	Mon Feb 27 16:22:02 2006 +0100
     1.3 @@ -195,11 +195,6 @@ void sort_main_extable(void);
     1.4  
     1.5  #define find_first_set_bit(x)	(ffs(x)-1)	// FIXME: Is this right???
     1.6  
     1.7 -// from include/asm-x86/*/uaccess.h
     1.8 -#define array_access_ok(addr,count,size)			\
     1.9 -    (likely(sizeof(count) <= 4) /* disallow 64-bit counts */ &&  \
    1.10 -     access_ok(type,addr,count*size))
    1.11 -
    1.12  // see drivers/char/console.c
    1.13  #ifndef VALIDATE_VT
    1.14  #define	OPT_CONSOLE_STR "com1"
    1.15 @@ -306,7 +301,6 @@ extern int ht_per_core;
    1.16  #define raw_smp_processor_id()	0
    1.17  #endif
    1.18  
    1.19 -
    1.20  #ifndef __ASSEMBLY__
    1.21  #include <linux/linkage.h>
    1.22  #endif
     2.1 --- a/xen/include/asm-ia64/linux-xen/asm/README.origin	Mon Feb 27 16:20:23 2006 +0100
     2.2 +++ b/xen/include/asm-ia64/linux-xen/asm/README.origin	Mon Feb 27 16:22:02 2006 +0100
     2.3 @@ -20,4 +20,3 @@ spinlock.h		-> linux/include/asm-ia64/sp
     2.4  system.h		-> linux/include/asm-ia64/system.h
     2.5  tlbflush.h		-> linux/include/asm-ia64/tlbflush.h
     2.6  types.h			-> linux/include/asm-ia64/types.h
     2.7 -uaccess.h		-> linux/include/asm-ia64/uaccess.h
     3.1 --- a/xen/include/asm-ia64/linux-xen/asm/uaccess.h	Mon Feb 27 16:20:23 2006 +0100
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,415 +0,0 @@
     3.4 -#ifndef _ASM_IA64_UACCESS_H
     3.5 -#define _ASM_IA64_UACCESS_H
     3.6 -
     3.7 -/*
     3.8 - * This file defines various macros to transfer memory areas across
     3.9 - * the user/kernel boundary.  This needs to be done carefully because
    3.10 - * this code is executed in kernel mode and uses user-specified
    3.11 - * addresses.  Thus, we need to be careful not to let the user to
    3.12 - * trick us into accessing kernel memory that would normally be
    3.13 - * inaccessible.  This code is also fairly performance sensitive,
    3.14 - * so we want to spend as little time doing safety checks as
    3.15 - * possible.
    3.16 - *
    3.17 - * To make matters a bit more interesting, these macros sometimes also
    3.18 - * called from within the kernel itself, in which case the address
    3.19 - * validity check must be skipped.  The get_fs() macro tells us what
    3.20 - * to do: if get_fs()==USER_DS, checking is performed, if
    3.21 - * get_fs()==KERNEL_DS, checking is bypassed.
    3.22 - *
    3.23 - * Note that even if the memory area specified by the user is in a
    3.24 - * valid address range, it is still possible that we'll get a page
    3.25 - * fault while accessing it.  This is handled by filling out an
    3.26 - * exception handler fixup entry for each instruction that has the
    3.27 - * potential to fault.  When such a fault occurs, the page fault
    3.28 - * handler checks to see whether the faulting instruction has a fixup
    3.29 - * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
    3.30 - * then resumes execution at the continuation point.
    3.31 - *
    3.32 - * Based on <asm-alpha/uaccess.h>.
    3.33 - *
    3.34 - * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
    3.35 - *	David Mosberger-Tang <davidm@hpl.hp.com>
    3.36 - */
    3.37 -
    3.38 -#include <linux/compiler.h>
    3.39 -#include <linux/errno.h>
    3.40 -#include <linux/sched.h>
    3.41 -#include <linux/page-flags.h>
    3.42 -#include <linux/mm.h>
    3.43 -
    3.44 -#include <asm/intrinsics.h>
    3.45 -#include <asm/pgtable.h>
    3.46 -#include <asm/io.h>
    3.47 -
    3.48 -/*
    3.49 - * For historical reasons, the following macros are grossly misnamed:
    3.50 - */
    3.51 -#define KERNEL_DS	((mm_segment_t) { ~0UL })		/* cf. access_ok() */
    3.52 -#define USER_DS		((mm_segment_t) { TASK_SIZE-1 })	/* cf. access_ok() */
    3.53 -
    3.54 -#define VERIFY_READ	0
    3.55 -#define VERIFY_WRITE	1
    3.56 -
    3.57 -#define get_ds()  (KERNEL_DS)
    3.58 -#define get_fs()  (current_thread_info()->addr_limit)
    3.59 -#define set_fs(x) (current_thread_info()->addr_limit = (x))
    3.60 -
    3.61 -#define segment_eq(a, b)	((a).seg == (b).seg)
    3.62 -
    3.63 -/*
    3.64 - * When accessing user memory, we need to make sure the entire area really is in
    3.65 - * user-level space.  In order to do this efficiently, we make sure that the page at
    3.66 - * address TASK_SIZE is never valid.  We also need to make sure that the address doesn't
    3.67 - * point inside the virtually mapped linear page table.
    3.68 - */
    3.69 -#ifdef XEN
    3.70 -#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
    3.71 -#define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned long)(addr)))
    3.72 -#else
    3.73 -#define __access_ok(addr, size, segment)						\
    3.74 -({											\
    3.75 -	__chk_user_ptr(addr);								\
    3.76 -	(likely((unsigned long) (addr) <= (segment).seg)				\
    3.77 -	 && ((segment).seg == KERNEL_DS.seg						\
    3.78 -	     || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT)));	\
    3.79 -})
    3.80 -#endif
    3.81 -#define access_ok(type, addr, size)	__access_ok((addr), (size), get_fs())
    3.82 -
    3.83 -/* this function will go away soon - use access_ok() instead */
    3.84 -static inline int __deprecated
    3.85 -verify_area (int type, const void __user *addr, unsigned long size)
    3.86 -{
    3.87 -	return access_ok(type, addr, size) ? 0 : -EFAULT;
    3.88 -}
    3.89 -
    3.90 -/*
    3.91 - * These are the main single-value transfer routines.  They automatically
    3.92 - * use the right size if we just have the right pointer type.
    3.93 - *
    3.94 - * Careful to not
    3.95 - * (a) re-use the arguments for side effects (sizeof/typeof is ok)
    3.96 - * (b) require any knowledge of processes at this stage
    3.97 - */
    3.98 -#define put_user(x, ptr)	__put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
    3.99 -#define get_user(x, ptr)	__get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
   3.100 -
   3.101 -/*
   3.102 - * The "__xxx" versions do not do address space checking, useful when
   3.103 - * doing multiple accesses to the same area (the programmer has to do the
   3.104 - * checks by hand with "access_ok()")
   3.105 - */
   3.106 -#define __put_user(x, ptr)	__put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
   3.107 -#define __get_user(x, ptr)	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
   3.108 -
   3.109 -extern long __put_user_unaligned_unknown (void);
   3.110 -
   3.111 -#define __put_user_unaligned(x, ptr)								\
   3.112 -({												\
   3.113 -	long __ret;										\
   3.114 -	switch (sizeof(*(ptr))) {								\
   3.115 -		case 1: __ret = __put_user((x), (ptr)); break;					\
   3.116 -		case 2: __ret = (__put_user((x), (u8 __user *)(ptr)))				\
   3.117 -			| (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break;		\
   3.118 -		case 4: __ret = (__put_user((x), (u16 __user *)(ptr)))				\
   3.119 -			| (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break;		\
   3.120 -		case 8: __ret = (__put_user((x), (u32 __user *)(ptr)))				\
   3.121 -			| (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break;		\
   3.122 -		default: __ret = __put_user_unaligned_unknown();				\
   3.123 -	}											\
   3.124 -	__ret;											\
   3.125 -})
   3.126 -
   3.127 -extern long __get_user_unaligned_unknown (void);
   3.128 -
   3.129 -#define __get_user_unaligned(x, ptr)								\
   3.130 -({												\
   3.131 -	long __ret;										\
   3.132 -	switch (sizeof(*(ptr))) {								\
   3.133 -		case 1: __ret = __get_user((x), (ptr)); break;					\
   3.134 -		case 2: __ret = (__get_user((x), (u8 __user *)(ptr)))				\
   3.135 -			| (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break;		\
   3.136 -		case 4: __ret = (__get_user((x), (u16 __user *)(ptr)))				\
   3.137 -			| (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break;		\
   3.138 -		case 8: __ret = (__get_user((x), (u32 __user *)(ptr)))				\
   3.139 -			| (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break;		\
   3.140 -		default: __ret = __get_user_unaligned_unknown();				\
   3.141 -	}											\
   3.142 -	__ret;											\
   3.143 -})
   3.144 -
   3.145 -#ifdef ASM_SUPPORTED
   3.146 -  struct __large_struct { unsigned long buf[100]; };
   3.147 -# define __m(x) (*(struct __large_struct __user *)(x))
   3.148 -
   3.149 -/* We need to declare the __ex_table section before we can use it in .xdata.  */
   3.150 -asm (".section \"__ex_table\", \"a\"\n\t.previous");
   3.151 -
   3.152 -# define __get_user_size(val, addr, n, err)							\
   3.153 -do {												\
   3.154 -	register long __gu_r8 asm ("r8") = 0;							\
   3.155 -	register long __gu_r9 asm ("r9");							\
   3.156 -	asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
   3.157 -	     "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n"						\
   3.158 -	     "[1:]"										\
   3.159 -	     : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8));			\
   3.160 -	(err) = __gu_r8;									\
   3.161 -	(val) = __gu_r9;									\
   3.162 -} while (0)
   3.163 -
   3.164 -/*
   3.165 - * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it.  This
   3.166 - * is because they do not write to any memory gcc knows about, so there are no aliasing
   3.167 - * issues.
   3.168 - */
   3.169 -# define __put_user_size(val, addr, n, err)							\
   3.170 -do {												\
   3.171 -	register long __pu_r8 asm ("r8") = 0;							\
   3.172 -	asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
   3.173 -		      "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n"					\
   3.174 -		      "[1:]"									\
   3.175 -		      : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8));		\
   3.176 -	(err) = __pu_r8;									\
   3.177 -} while (0)
   3.178 -
   3.179 -#else /* !ASM_SUPPORTED */
   3.180 -# define RELOC_TYPE	2	/* ip-rel */
   3.181 -# define __get_user_size(val, addr, n, err)				\
   3.182 -do {									\
   3.183 -	__ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE);	\
   3.184 -	(err) = ia64_getreg(_IA64_REG_R8);				\
   3.185 -	(val) = ia64_getreg(_IA64_REG_R9);				\
   3.186 -} while (0)
   3.187 -# define __put_user_size(val, addr, n, err)							\
   3.188 -do {												\
   3.189 -	__st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val));	\
   3.190 -	(err) = ia64_getreg(_IA64_REG_R8);							\
   3.191 -} while (0)
   3.192 -#endif /* !ASM_SUPPORTED */
   3.193 -
   3.194 -extern void __get_user_unknown (void);
   3.195 -
   3.196 -/*
   3.197 - * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
   3.198 - * could clobber r8 and r9 (among others).  Thus, be careful not to evaluate it while
   3.199 - * using r8/r9.
   3.200 - */
   3.201 -#define __do_get_user(check, x, ptr, size, segment)					\
   3.202 -({											\
   3.203 -	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);				\
   3.204 -	__typeof__ (size) __gu_size = (size);						\
   3.205 -	long __gu_err = -EFAULT, __gu_val = 0;						\
   3.206 -											\
   3.207 -	if (!check || __access_ok(__gu_ptr, size, segment))				\
   3.208 -		switch (__gu_size) {							\
   3.209 -		      case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break;	\
   3.210 -		      case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break;	\
   3.211 -		      case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break;	\
   3.212 -		      case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break;	\
   3.213 -		      default: __get_user_unknown(); break;				\
   3.214 -		}									\
   3.215 -	(x) = (__typeof__(*(__gu_ptr))) __gu_val;					\
   3.216 -	__gu_err;									\
   3.217 -})
   3.218 -
   3.219 -#define __get_user_nocheck(x, ptr, size)	__do_get_user(0, x, ptr, size, KERNEL_DS)
   3.220 -#define __get_user_check(x, ptr, size, segment)	__do_get_user(1, x, ptr, size, segment)
   3.221 -
   3.222 -extern void __put_user_unknown (void);
   3.223 -
   3.224 -/*
   3.225 - * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
   3.226 - * could clobber r8 (among others).  Thus, be careful not to evaluate them while using r8.
   3.227 - */
   3.228 -#define __do_put_user(check, x, ptr, size, segment)					\
   3.229 -({											\
   3.230 -	__typeof__ (x) __pu_x = (x);							\
   3.231 -	__typeof__ (*(ptr)) __user *__pu_ptr = (ptr);					\
   3.232 -	__typeof__ (size) __pu_size = (size);						\
   3.233 -	long __pu_err = -EFAULT;							\
   3.234 -											\
   3.235 -	if (!check || __access_ok(__pu_ptr, __pu_size, segment))			\
   3.236 -		switch (__pu_size) {							\
   3.237 -		      case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break;	\
   3.238 -		      case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break;	\
   3.239 -		      case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break;	\
   3.240 -		      case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break;	\
   3.241 -		      default: __put_user_unknown(); break;				\
   3.242 -		}									\
   3.243 -	__pu_err;									\
   3.244 -})
   3.245 -
   3.246 -#define __put_user_nocheck(x, ptr, size)	__do_put_user(0, x, ptr, size, KERNEL_DS)
   3.247 -#define __put_user_check(x, ptr, size, segment)	__do_put_user(1, x, ptr, size, segment)
   3.248 -
   3.249 -/*
   3.250 - * Complex access routines
   3.251 - */
   3.252 -extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
   3.253 -					       unsigned long count);
   3.254 -
   3.255 -static inline unsigned long
   3.256 -__copy_to_user (void __user *to, const void *from, unsigned long count)
   3.257 -{
   3.258 -	return __copy_user(to, (void __user *) from, count);
   3.259 -}
   3.260 -
   3.261 -static inline unsigned long
   3.262 -__copy_from_user (void *to, const void __user *from, unsigned long count)
   3.263 -{
   3.264 -	return __copy_user((void __user *) to, from, count);
   3.265 -}
   3.266 -
   3.267 -#define __copy_to_user_inatomic		__copy_to_user
   3.268 -#define __copy_from_user_inatomic	__copy_from_user
   3.269 -#define copy_to_user(to, from, n)							\
   3.270 -({											\
   3.271 -	void __user *__cu_to = (to);							\
   3.272 -	const void *__cu_from = (from);							\
   3.273 -	long __cu_len = (n);								\
   3.274 -											\
   3.275 -	if (__access_ok(__cu_to, __cu_len, get_fs()))					\
   3.276 -		__cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len);	\
   3.277 -	__cu_len;									\
   3.278 -})
   3.279 -
   3.280 -#define copy_from_user(to, from, n)							\
   3.281 -({											\
   3.282 -	void *__cu_to = (to);								\
   3.283 -	const void __user *__cu_from = (from);						\
   3.284 -	long __cu_len = (n);								\
   3.285 -											\
   3.286 -	__chk_user_ptr(__cu_from);							\
   3.287 -	if (__access_ok(__cu_from, __cu_len, get_fs()))					\
   3.288 -		__cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len);	\
   3.289 -	__cu_len;									\
   3.290 -})
   3.291 -
   3.292 -#define __copy_in_user(to, from, size)	__copy_user((to), (from), (size))
   3.293 -
   3.294 -static inline unsigned long
   3.295 -copy_in_user (void __user *to, const void __user *from, unsigned long n)
   3.296 -{
   3.297 -	if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)))
   3.298 -		n = __copy_user(to, from, n);
   3.299 -	return n;
   3.300 -}
   3.301 -
   3.302 -extern unsigned long __do_clear_user (void __user *, unsigned long);
   3.303 -
   3.304 -#define __clear_user(to, n)		__do_clear_user(to, n)
   3.305 -
   3.306 -#define clear_user(to, n)					\
   3.307 -({								\
   3.308 -	unsigned long __cu_len = (n);				\
   3.309 -	if (__access_ok(to, __cu_len, get_fs()))		\
   3.310 -		__cu_len = __do_clear_user(to, __cu_len);	\
   3.311 -	__cu_len;						\
   3.312 -})
   3.313 -
   3.314 -
   3.315 -/*
   3.316 - * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else
   3.317 - * strlen.
   3.318 - */
   3.319 -extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
   3.320 -
   3.321 -#define strncpy_from_user(to, from, n)					\
   3.322 -({									\
   3.323 -	const char __user * __sfu_from = (from);			\
   3.324 -	long __sfu_ret = -EFAULT;					\
   3.325 -	if (__access_ok(__sfu_from, 0, get_fs()))			\
   3.326 -		__sfu_ret = __strncpy_from_user((to), __sfu_from, (n));	\
   3.327 -	__sfu_ret;							\
   3.328 -})
   3.329 -
   3.330 -/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
   3.331 -extern unsigned long __strlen_user (const char __user *);
   3.332 -
   3.333 -#define strlen_user(str)				\
   3.334 -({							\
   3.335 -	const char __user *__su_str = (str);		\
   3.336 -	unsigned long __su_ret = 0;			\
   3.337 -	if (__access_ok(__su_str, 0, get_fs()))		\
   3.338 -		__su_ret = __strlen_user(__su_str);	\
   3.339 -	__su_ret;					\
   3.340 -})
   3.341 -
   3.342 -/*
   3.343 - * Returns: 0 if exception before NUL or reaching the supplied limit
   3.344 - * (N), a value greater than N if the limit would be exceeded, else
   3.345 - * strlen.
   3.346 - */
   3.347 -extern unsigned long __strnlen_user (const char __user *, long);
   3.348 -
   3.349 -#define strnlen_user(str, len)					\
   3.350 -({								\
   3.351 -	const char __user *__su_str = (str);			\
   3.352 -	unsigned long __su_ret = 0;				\
   3.353 -	if (__access_ok(__su_str, 0, get_fs()))			\
   3.354 -		__su_ret = __strnlen_user(__su_str, len);	\
   3.355 -	__su_ret;						\
   3.356 -})
   3.357 -
   3.358 -/* Generic code can't deal with the location-relative format that we use for compactness.  */
   3.359 -#define ARCH_HAS_SORT_EXTABLE
   3.360 -#define ARCH_HAS_SEARCH_EXTABLE
   3.361 -
   3.362 -struct exception_table_entry {
   3.363 -	int addr;	/* location-relative address of insn this fixup is for */
   3.364 -	int cont;	/* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
   3.365 -};
   3.366 -
   3.367 -extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e);
   3.368 -extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
   3.369 -
   3.370 -static inline int
   3.371 -ia64_done_with_exception (struct pt_regs *regs)
   3.372 -{
   3.373 -	const struct exception_table_entry *e;
   3.374 -	e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
   3.375 -	if (e) {
   3.376 -		ia64_handle_exception(regs, e);
   3.377 -		return 1;
   3.378 -	}
   3.379 -	return 0;
   3.380 -}
   3.381 -
   3.382 -#ifndef XEN
   3.383 -#define ARCH_HAS_TRANSLATE_MEM_PTR	1
   3.384 -static __inline__ char *
   3.385 -xlate_dev_mem_ptr (unsigned long p)
   3.386 -{
   3.387 -	struct page *page;
   3.388 -	char * ptr;
   3.389 -
   3.390 -	page = mfn_to_page(p >> PAGE_SHIFT);
   3.391 -	if (PageUncached(page))
   3.392 -		ptr = (char *)p + __IA64_UNCACHED_OFFSET;
   3.393 -	else
   3.394 -		ptr = __va(p);
   3.395 -
   3.396 -	return ptr;
   3.397 -}
   3.398 -
   3.399 -/*
   3.400 - * Convert a virtual cached kernel memory pointer to an uncached pointer
   3.401 - */
   3.402 -static __inline__ char *
   3.403 -xlate_dev_kmem_ptr (char * p)
   3.404 -{
   3.405 -	struct page *page;
   3.406 -	char * ptr;
   3.407 -
   3.408 -	page = virt_to_page((unsigned long)p >> PAGE_SHIFT);
   3.409 -	if (PageUncached(page))
   3.410 -		ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
   3.411 -	else
   3.412 -		ptr = p;
   3.413 -
   3.414 -	return ptr;
   3.415 -}
   3.416 -#endif
   3.417 -
   3.418 -#endif /* _ASM_IA64_UACCESS_H */
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/include/asm-ia64/uaccess.h	Mon Feb 27 16:22:02 2006 +0100
     4.3 @@ -0,0 +1,285 @@
     4.4 +#ifndef _ASM_IA64_UACCESS_H
     4.5 +#define _ASM_IA64_UACCESS_H
     4.6 +
     4.7 +/*
     4.8 + * This file defines various macros to transfer memory areas across
     4.9 + * the user/kernel boundary.  This needs to be done carefully because
    4.10 + * this code is executed in kernel mode and uses user-specified
    4.11 + * addresses.  Thus, we need to be careful not to let the user to
    4.12 + * trick us into accessing kernel memory that would normally be
    4.13 + * inaccessible.  This code is also fairly performance sensitive,
    4.14 + * so we want to spend as little time doing safety checks as
    4.15 + * possible.
    4.16 + *
    4.17 + * To make matters a bit more interesting, these macros sometimes also
    4.18 + * called from within the kernel itself, in which case the address
    4.19 + * validity check must be skipped.  The get_fs() macro tells us what
    4.20 + * to do: if get_fs()==USER_DS, checking is performed, if
    4.21 + * get_fs()==KERNEL_DS, checking is bypassed.
    4.22 + *
    4.23 + * Note that even if the memory area specified by the user is in a
    4.24 + * valid address range, it is still possible that we'll get a page
    4.25 + * fault while accessing it.  This is handled by filling out an
    4.26 + * exception handler fixup entry for each instruction that has the
    4.27 + * potential to fault.  When such a fault occurs, the page fault
    4.28 + * handler checks to see whether the faulting instruction has a fixup
    4.29 + * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
    4.30 + * then resumes execution at the continuation point.
    4.31 + *
    4.32 + * Based on <asm-alpha/uaccess.h>.
    4.33 + *
    4.34 + * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
    4.35 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    4.36 + */
    4.37 +
    4.38 +#include <linux/compiler.h>
    4.39 +#include <linux/errno.h>
    4.40 +#include <linux/sched.h>
    4.41 +#include <linux/page-flags.h>
    4.42 +#include <linux/mm.h>
    4.43 +
    4.44 +#include <asm/intrinsics.h>
    4.45 +#include <asm/pgtable.h>
    4.46 +#include <asm/io.h>
    4.47 +
    4.48 +#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
    4.49 +#define __access_ok(addr) (!IS_VMM_ADDRESS((unsigned long)(addr)))
    4.50 +#define access_ok(addr, size) (__access_ok(addr))
    4.51 +#define array_access_ok(addr,count,size)( __access_ok(addr))
    4.52 +
    4.53 +/*
    4.54 + * These are the main single-value transfer routines.  They automatically
    4.55 + * use the right size if we just have the right pointer type.
    4.56 + *
    4.57 + * Careful to not
    4.58 + * (a) re-use the arguments for side effects (sizeof/typeof is ok)
    4.59 + * (b) require any knowledge of processes at this stage
    4.60 + */
    4.61 +#define put_user(x, ptr)	__put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
    4.62 +#define get_user(x, ptr)	__get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
    4.63 +
    4.64 +/*
    4.65 + * The "__xxx" versions do not do address space checking, useful when
    4.66 + * doing multiple accesses to the same area (the programmer has to do the
    4.67 + * checks by hand with "access_ok()")
    4.68 + */
    4.69 +#define __put_user(x, ptr)	__put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
    4.70 +#define __get_user(x, ptr)	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
    4.71 +
    4.72 +extern long __put_user_unaligned_unknown (void);
    4.73 +
    4.74 +#define __put_user_unaligned(x, ptr)								\
    4.75 +({												\
    4.76 +	long __ret;										\
    4.77 +	switch (sizeof(*(ptr))) {								\
    4.78 +		case 1: __ret = __put_user((x), (ptr)); break;					\
    4.79 +		case 2: __ret = (__put_user((x), (u8 __user *)(ptr)))				\
    4.80 +			| (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break;		\
    4.81 +		case 4: __ret = (__put_user((x), (u16 __user *)(ptr)))				\
    4.82 +			| (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break;		\
    4.83 +		case 8: __ret = (__put_user((x), (u32 __user *)(ptr)))				\
    4.84 +			| (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break;		\
    4.85 +		default: __ret = __put_user_unaligned_unknown();				\
    4.86 +	}											\
    4.87 +	__ret;											\
    4.88 +})
    4.89 +
    4.90 +extern long __get_user_unaligned_unknown (void);
    4.91 +
    4.92 +#define __get_user_unaligned(x, ptr)								\
    4.93 +({												\
    4.94 +	long __ret;										\
    4.95 +	switch (sizeof(*(ptr))) {								\
    4.96 +		case 1: __ret = __get_user((x), (ptr)); break;					\
    4.97 +		case 2: __ret = (__get_user((x), (u8 __user *)(ptr)))				\
    4.98 +			| (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break;		\
    4.99 +		case 4: __ret = (__get_user((x), (u16 __user *)(ptr)))				\
   4.100 +			| (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break;		\
   4.101 +		case 8: __ret = (__get_user((x), (u32 __user *)(ptr)))				\
   4.102 +			| (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break;		\
   4.103 +		default: __ret = __get_user_unaligned_unknown();				\
   4.104 +	}											\
   4.105 +	__ret;											\
   4.106 +})
   4.107 +
   4.108 +#ifdef ASM_SUPPORTED
   4.109 +  struct __large_struct { unsigned long buf[100]; };
   4.110 +# define __m(x) (*(struct __large_struct __user *)(x))
   4.111 +
   4.112 +/* We need to declare the __ex_table section before we can use it in .xdata.  */
   4.113 +asm (".section \"__ex_table\", \"a\"\n\t.previous");
   4.114 +
   4.115 +# define __get_user_size(val, addr, n, err)							\
   4.116 +do {												\
   4.117 +	register long __gu_r8 asm ("r8") = 0;							\
   4.118 +	register long __gu_r9 asm ("r9");							\
   4.119 +	asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n"	\
   4.120 +	     "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n"						\
   4.121 +	     "[1:]"										\
   4.122 +	     : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8));			\
   4.123 +	(err) = __gu_r8;									\
   4.124 +	(val) = __gu_r9;									\
   4.125 +} while (0)
   4.126 +
   4.127 +/*
   4.128 + * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it.  This
   4.129 + * is because they do not write to any memory gcc knows about, so there are no aliasing
   4.130 + * issues.
   4.131 + */
   4.132 +# define __put_user_size(val, addr, n, err)							\
   4.133 +do {												\
   4.134 +	register long __pu_r8 asm ("r8") = 0;							\
   4.135 +	asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n"	\
   4.136 +		      "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n"					\
   4.137 +		      "[1:]"									\
   4.138 +		      : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8));		\
   4.139 +	(err) = __pu_r8;									\
   4.140 +} while (0)
   4.141 +
   4.142 +#else /* !ASM_SUPPORTED */
   4.143 +# define RELOC_TYPE	2	/* ip-rel */
   4.144 +# define __get_user_size(val, addr, n, err)				\
   4.145 +do {									\
   4.146 +	__ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE);	\
   4.147 +	(err) = ia64_getreg(_IA64_REG_R8);				\
   4.148 +	(val) = ia64_getreg(_IA64_REG_R9);				\
   4.149 +} while (0)
   4.150 +# define __put_user_size(val, addr, n, err)							\
   4.151 +do {												\
   4.152 +	__st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val));	\
   4.153 +	(err) = ia64_getreg(_IA64_REG_R8);							\
   4.154 +} while (0)
   4.155 +#endif /* !ASM_SUPPORTED */
   4.156 +
   4.157 +extern void __get_user_unknown (void);
   4.158 +
   4.159 +/*
   4.160 + * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
   4.161 + * could clobber r8 and r9 (among others).  Thus, be careful not to evaluate it while
   4.162 + * using r8/r9.
   4.163 + */
   4.164 +#define __do_get_user(check, x, ptr, size, segment)					\
   4.165 +({											\
   4.166 +	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);				\
   4.167 +	__typeof__ (size) __gu_size = (size);						\
   4.168 +	long __gu_err = -EFAULT, __gu_val = 0;						\
   4.169 +											\
   4.170 +	if (!check || __access_ok(__gu_ptr))						\
   4.171 +		switch (__gu_size) {							\
   4.172 +		      case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break;	\
   4.173 +		      case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break;	\
   4.174 +		      case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break;	\
   4.175 +		      case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break;	\
   4.176 +		      default: __get_user_unknown(); break;				\
   4.177 +		}									\
   4.178 +	(x) = (__typeof__(*(__gu_ptr))) __gu_val;					\
   4.179 +	__gu_err;									\
   4.180 +})
   4.181 +
   4.182 +#define __get_user_nocheck(x, ptr, size)	__do_get_user(0, x, ptr, size, KERNEL_DS)
   4.183 +#define __get_user_check(x, ptr, size, segment)	__do_get_user(1, x, ptr, size, segment)
   4.184 +
   4.185 +extern void __put_user_unknown (void);
   4.186 +
   4.187 +/*
   4.188 + * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
   4.189 + * could clobber r8 (among others).  Thus, be careful not to evaluate them while using r8.
   4.190 + */
   4.191 +#define __do_put_user(check, x, ptr, size, segment)					\
   4.192 +({											\
   4.193 +	__typeof__ (x) __pu_x = (x);							\
   4.194 +	__typeof__ (*(ptr)) __user *__pu_ptr = (ptr);					\
   4.195 +	__typeof__ (size) __pu_size = (size);						\
   4.196 +	long __pu_err = -EFAULT;							\
   4.197 +											\
   4.198 +	if (!check || __access_ok(__pu_ptr))						\
   4.199 +		switch (__pu_size) {							\
   4.200 +		      case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break;	\
   4.201 +		      case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break;	\
   4.202 +		      case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break;	\
   4.203 +		      case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break;	\
   4.204 +		      default: __put_user_unknown(); break;				\
   4.205 +		}									\
   4.206 +	__pu_err;									\
   4.207 +})
   4.208 +
   4.209 +#define __put_user_nocheck(x, ptr, size)	__do_put_user(0, x, ptr, size, KERNEL_DS)
   4.210 +#define __put_user_check(x, ptr, size, segment)	__do_put_user(1, x, ptr, size, segment)
   4.211 +
   4.212 +/*
   4.213 + * Complex access routines
   4.214 + */
   4.215 +extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
   4.216 +					       unsigned long count);
   4.217 +
   4.218 +static inline unsigned long
   4.219 +__copy_to_user (void __user *to, const void *from, unsigned long count)
   4.220 +{
   4.221 +	return __copy_user(to, (void __user *) from, count);
   4.222 +}
   4.223 +
   4.224 +static inline unsigned long
   4.225 +__copy_from_user (void *to, const void __user *from, unsigned long count)
   4.226 +{
   4.227 +	return __copy_user((void __user *) to, from, count);
   4.228 +}
   4.229 +
   4.230 +#define __copy_to_user_inatomic		__copy_to_user
   4.231 +#define __copy_from_user_inatomic	__copy_from_user
   4.232 +#define copy_to_user(to, from, n)							\
   4.233 +({											\
   4.234 +	void __user *__cu_to = (to);							\
   4.235 +	const void *__cu_from = (from);							\
   4.236 +	long __cu_len = (n);								\
   4.237 +											\
   4.238 +	if (__access_ok(__cu_to))							\
   4.239 +		__cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len);	\
   4.240 +	__cu_len;									\
   4.241 +})
   4.242 +
   4.243 +#define copy_from_user(to, from, n)							\
   4.244 +({											\
   4.245 +	void *__cu_to = (to);								\
   4.246 +	const void __user *__cu_from = (from);						\
   4.247 +	long __cu_len = (n);								\
   4.248 +											\
   4.249 +	__chk_user_ptr(__cu_from);							\
   4.250 +	if (__access_ok(__cu_from))							\
   4.251 +		__cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len);	\
   4.252 +	__cu_len;									\
   4.253 +})
   4.254 +
   4.255 +#define __copy_in_user(to, from, size)	__copy_user((to), (from), (size))
   4.256 +
   4.257 +static inline unsigned long
   4.258 +copy_in_user (void __user *to, const void __user *from, unsigned long n)
   4.259 +{
   4.260 +	if (likely(access_ok(from, n) && access_ok(to, n)))
   4.261 +		n = __copy_user(to, from, n);
   4.262 +	return n;
   4.263 +}
   4.264 +
   4.265 +#define ARCH_HAS_SORT_EXTABLE
   4.266 +#define ARCH_HAS_SEARCH_EXTABLE
   4.267 +
   4.268 +struct exception_table_entry {
   4.269 +	int addr;	/* location-relative address of insn this fixup is for */
   4.270 +	int cont;	/* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
   4.271 +};
   4.272 +
   4.273 +extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e);
   4.274 +extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
   4.275 +
   4.276 +static inline int
   4.277 +ia64_done_with_exception (struct pt_regs *regs)
   4.278 +{
   4.279 +	const struct exception_table_entry *e;
   4.280 +	e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
   4.281 +	if (e) {
   4.282 +		ia64_handle_exception(regs, e);
   4.283 +		return 1;
   4.284 +	}
   4.285 +	return 0;
   4.286 +}
   4.287 +
   4.288 +#endif /* _ASM_IA64_UACCESS_H */