ia64/xen-unstable

view xen/arch/x86/x86_32/usercopy.c @ 5281:71124f0ea5d4

bitkeeper revision 1.1645 (429f63fb6JW1K1LYjnjnswN1KOd0Jg)

Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/cl349/xen-unstable.bk
author cl349@firebug.cl.cam.ac.uk
date Thu Jun 02 19:54:35 2005 +0000 (2005-06-02)
parents dfe18db08708 fbb832f71d22
children
line source
1 /*
2 * User address space access functions.
3 * The non inlined parts of asm-i386/uaccess.h are here.
4 *
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <asm/uaccess.h>
13 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
14 {
15 #ifdef CONFIG_X86_INTEL_USERCOPY
16 if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
17 return 0;
18 #endif
19 return 1;
20 }
21 #define movsl_is_ok(a1,a2,n) \
22 __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
25 /*
26 * Zero Userspace
27 */
29 #define __do_clear_user(addr,size) \
30 do { \
31 int __d0; \
32 __asm__ __volatile__( \
33 "0: rep; stosl\n" \
34 " movl %2,%0\n" \
35 "1: rep; stosb\n" \
36 "2:\n" \
37 ".section .fixup,\"ax\"\n" \
38 "3: lea 0(%2,%0,4),%0\n" \
39 " jmp 2b\n" \
40 ".previous\n" \
41 ".section __ex_table,\"a\"\n" \
42 " .align 4\n" \
43 " .long 0b,3b\n" \
44 " .long 1b,2b\n" \
45 ".previous" \
46 : "=&c"(size), "=&D" (__d0) \
47 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
48 } while (0)
50 /**
51 * clear_user: - Zero a block of memory in user space.
52 * @to: Destination address, in user space.
53 * @n: Number of bytes to zero.
54 *
55 * Zero a block of memory in user space.
56 *
57 * Returns number of bytes that could not be cleared.
58 * On success, this will be zero.
59 */
60 unsigned long
61 clear_user(void __user *to, unsigned long n)
62 {
63 if (access_ok(to, n))
64 __do_clear_user(to, n);
65 return n;
66 }
68 /**
69 * __clear_user: - Zero a block of memory in user space, with less checking.
70 * @to: Destination address, in user space.
71 * @n: Number of bytes to zero.
72 *
73 * Zero a block of memory in user space. Caller must check
74 * the specified block with access_ok() before calling this function.
75 *
76 * Returns number of bytes that could not be cleared.
77 * On success, this will be zero.
78 */
79 unsigned long
80 __clear_user(void __user *to, unsigned long n)
81 {
82 __do_clear_user(to, n);
83 return n;
84 }
86 #ifdef CONFIG_X86_INTEL_USERCOPY
87 static unsigned long
88 __copy_user_intel(void __user *to, const void *from, unsigned long size)
89 {
90 int d0, d1;
91 __asm__ __volatile__(
92 " .align 2,0x90\n"
93 "1: movl 32(%4), %%eax\n"
94 " cmpl $67, %0\n"
95 " jbe 3f\n"
96 "2: movl 64(%4), %%eax\n"
97 " .align 2,0x90\n"
98 "3: movl 0(%4), %%eax\n"
99 "4: movl 4(%4), %%edx\n"
100 "5: movl %%eax, 0(%3)\n"
101 "6: movl %%edx, 4(%3)\n"
102 "7: movl 8(%4), %%eax\n"
103 "8: movl 12(%4),%%edx\n"
104 "9: movl %%eax, 8(%3)\n"
105 "10: movl %%edx, 12(%3)\n"
106 "11: movl 16(%4), %%eax\n"
107 "12: movl 20(%4), %%edx\n"
108 "13: movl %%eax, 16(%3)\n"
109 "14: movl %%edx, 20(%3)\n"
110 "15: movl 24(%4), %%eax\n"
111 "16: movl 28(%4), %%edx\n"
112 "17: movl %%eax, 24(%3)\n"
113 "18: movl %%edx, 28(%3)\n"
114 "19: movl 32(%4), %%eax\n"
115 "20: movl 36(%4), %%edx\n"
116 "21: movl %%eax, 32(%3)\n"
117 "22: movl %%edx, 36(%3)\n"
118 "23: movl 40(%4), %%eax\n"
119 "24: movl 44(%4), %%edx\n"
120 "25: movl %%eax, 40(%3)\n"
121 "26: movl %%edx, 44(%3)\n"
122 "27: movl 48(%4), %%eax\n"
123 "28: movl 52(%4), %%edx\n"
124 "29: movl %%eax, 48(%3)\n"
125 "30: movl %%edx, 52(%3)\n"
126 "31: movl 56(%4), %%eax\n"
127 "32: movl 60(%4), %%edx\n"
128 "33: movl %%eax, 56(%3)\n"
129 "34: movl %%edx, 60(%3)\n"
130 " addl $-64, %0\n"
131 " addl $64, %4\n"
132 " addl $64, %3\n"
133 " cmpl $63, %0\n"
134 " ja 1b\n"
135 "35: movl %0, %%eax\n"
136 " shrl $2, %0\n"
137 " andl $3, %%eax\n"
138 " cld\n"
139 "99: rep; movsl\n"
140 "36: movl %%eax, %0\n"
141 "37: rep; movsb\n"
142 "100:\n"
143 ".section .fixup,\"ax\"\n"
144 "101: lea 0(%%eax,%0,4),%0\n"
145 " jmp 100b\n"
146 ".previous\n"
147 ".section __ex_table,\"a\"\n"
148 " .align 4\n"
149 " .long 1b,100b\n"
150 " .long 2b,100b\n"
151 " .long 3b,100b\n"
152 " .long 4b,100b\n"
153 " .long 5b,100b\n"
154 " .long 6b,100b\n"
155 " .long 7b,100b\n"
156 " .long 8b,100b\n"
157 " .long 9b,100b\n"
158 " .long 10b,100b\n"
159 " .long 11b,100b\n"
160 " .long 12b,100b\n"
161 " .long 13b,100b\n"
162 " .long 14b,100b\n"
163 " .long 15b,100b\n"
164 " .long 16b,100b\n"
165 " .long 17b,100b\n"
166 " .long 18b,100b\n"
167 " .long 19b,100b\n"
168 " .long 20b,100b\n"
169 " .long 21b,100b\n"
170 " .long 22b,100b\n"
171 " .long 23b,100b\n"
172 " .long 24b,100b\n"
173 " .long 25b,100b\n"
174 " .long 26b,100b\n"
175 " .long 27b,100b\n"
176 " .long 28b,100b\n"
177 " .long 29b,100b\n"
178 " .long 30b,100b\n"
179 " .long 31b,100b\n"
180 " .long 32b,100b\n"
181 " .long 33b,100b\n"
182 " .long 34b,100b\n"
183 " .long 35b,100b\n"
184 " .long 36b,100b\n"
185 " .long 37b,100b\n"
186 " .long 99b,101b\n"
187 ".previous"
188 : "=&c"(size), "=&D" (d0), "=&S" (d1)
189 : "1"(to), "2"(from), "0"(size)
190 : "eax", "edx", "memory");
191 return size;
192 }
194 static unsigned long
195 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
196 {
197 int d0, d1;
198 __asm__ __volatile__(
199 " .align 2,0x90\n"
200 "0: movl 32(%4), %%eax\n"
201 " cmpl $67, %0\n"
202 " jbe 2f\n"
203 "1: movl 64(%4), %%eax\n"
204 " .align 2,0x90\n"
205 "2: movl 0(%4), %%eax\n"
206 "21: movl 4(%4), %%edx\n"
207 " movl %%eax, 0(%3)\n"
208 " movl %%edx, 4(%3)\n"
209 "3: movl 8(%4), %%eax\n"
210 "31: movl 12(%4),%%edx\n"
211 " movl %%eax, 8(%3)\n"
212 " movl %%edx, 12(%3)\n"
213 "4: movl 16(%4), %%eax\n"
214 "41: movl 20(%4), %%edx\n"
215 " movl %%eax, 16(%3)\n"
216 " movl %%edx, 20(%3)\n"
217 "10: movl 24(%4), %%eax\n"
218 "51: movl 28(%4), %%edx\n"
219 " movl %%eax, 24(%3)\n"
220 " movl %%edx, 28(%3)\n"
221 "11: movl 32(%4), %%eax\n"
222 "61: movl 36(%4), %%edx\n"
223 " movl %%eax, 32(%3)\n"
224 " movl %%edx, 36(%3)\n"
225 "12: movl 40(%4), %%eax\n"
226 "71: movl 44(%4), %%edx\n"
227 " movl %%eax, 40(%3)\n"
228 " movl %%edx, 44(%3)\n"
229 "13: movl 48(%4), %%eax\n"
230 "81: movl 52(%4), %%edx\n"
231 " movl %%eax, 48(%3)\n"
232 " movl %%edx, 52(%3)\n"
233 "14: movl 56(%4), %%eax\n"
234 "91: movl 60(%4), %%edx\n"
235 " movl %%eax, 56(%3)\n"
236 " movl %%edx, 60(%3)\n"
237 " addl $-64, %0\n"
238 " addl $64, %4\n"
239 " addl $64, %3\n"
240 " cmpl $63, %0\n"
241 " ja 0b\n"
242 "5: movl %0, %%eax\n"
243 " shrl $2, %0\n"
244 " andl $3, %%eax\n"
245 " cld\n"
246 "6: rep; movsl\n"
247 " movl %%eax,%0\n"
248 "7: rep; movsb\n"
249 "8:\n"
250 ".section .fixup,\"ax\"\n"
251 "9: lea 0(%%eax,%0,4),%0\n"
252 "16: pushl %0\n"
253 " pushl %%eax\n"
254 " xorl %%eax,%%eax\n"
255 " rep; stosb\n"
256 " popl %%eax\n"
257 " popl %0\n"
258 " jmp 8b\n"
259 ".previous\n"
260 ".section __ex_table,\"a\"\n"
261 " .align 4\n"
262 " .long 0b,16b\n"
263 " .long 1b,16b\n"
264 " .long 2b,16b\n"
265 " .long 21b,16b\n"
266 " .long 3b,16b\n"
267 " .long 31b,16b\n"
268 " .long 4b,16b\n"
269 " .long 41b,16b\n"
270 " .long 10b,16b\n"
271 " .long 51b,16b\n"
272 " .long 11b,16b\n"
273 " .long 61b,16b\n"
274 " .long 12b,16b\n"
275 " .long 71b,16b\n"
276 " .long 13b,16b\n"
277 " .long 81b,16b\n"
278 " .long 14b,16b\n"
279 " .long 91b,16b\n"
280 " .long 6b,9b\n"
281 " .long 7b,16b\n"
282 ".previous"
283 : "=&c"(size), "=&D" (d0), "=&S" (d1)
284 : "1"(to), "2"(from), "0"(size)
285 : "eax", "edx", "memory");
286 return size;
287 }
288 #else
289 /*
290 * Leave these declared but undefined. They should not be any references to
291 * them
292 */
293 unsigned long
294 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
295 unsigned long
296 __copy_user_intel(void __user *to, const void *from, unsigned long size);
297 #endif /* CONFIG_X86_INTEL_USERCOPY */
299 /* Generic arbitrary sized copy. */
300 #define __copy_user(to,from,size) \
301 do { \
302 int __d0, __d1, __d2; \
303 __asm__ __volatile__( \
304 " cmp $7,%0\n" \
305 " jbe 1f\n" \
306 " movl %1,%0\n" \
307 " negl %0\n" \
308 " andl $7,%0\n" \
309 " subl %0,%3\n" \
310 "4: rep; movsb\n" \
311 " movl %3,%0\n" \
312 " shrl $2,%0\n" \
313 " andl $3,%3\n" \
314 " .align 2,0x90\n" \
315 "0: rep; movsl\n" \
316 " movl %3,%0\n" \
317 "1: rep; movsb\n" \
318 "2:\n" \
319 ".section .fixup,\"ax\"\n" \
320 "5: addl %3,%0\n" \
321 " jmp 2b\n" \
322 "3: lea 0(%3,%0,4),%0\n" \
323 " jmp 2b\n" \
324 ".previous\n" \
325 ".section __ex_table,\"a\"\n" \
326 " .align 4\n" \
327 " .long 4b,5b\n" \
328 " .long 0b,3b\n" \
329 " .long 1b,2b\n" \
330 ".previous" \
331 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
332 : "3"(size), "0"(size), "1"(to), "2"(from) \
333 : "memory"); \
334 } while (0)
336 #define __copy_user_zeroing(to,from,size) \
337 do { \
338 int __d0, __d1, __d2; \
339 __asm__ __volatile__( \
340 " cmp $7,%0\n" \
341 " jbe 1f\n" \
342 " movl %1,%0\n" \
343 " negl %0\n" \
344 " andl $7,%0\n" \
345 " subl %0,%3\n" \
346 "4: rep; movsb\n" \
347 " movl %3,%0\n" \
348 " shrl $2,%0\n" \
349 " andl $3,%3\n" \
350 " .align 2,0x90\n" \
351 "0: rep; movsl\n" \
352 " movl %3,%0\n" \
353 "1: rep; movsb\n" \
354 "2:\n" \
355 ".section .fixup,\"ax\"\n" \
356 "5: addl %3,%0\n" \
357 " jmp 6f\n" \
358 "3: lea 0(%3,%0,4),%0\n" \
359 "6: pushl %0\n" \
360 " pushl %%eax\n" \
361 " xorl %%eax,%%eax\n" \
362 " rep; stosb\n" \
363 " popl %%eax\n" \
364 " popl %0\n" \
365 " jmp 2b\n" \
366 ".previous\n" \
367 ".section __ex_table,\"a\"\n" \
368 " .align 4\n" \
369 " .long 4b,5b\n" \
370 " .long 0b,3b\n" \
371 " .long 1b,6b\n" \
372 ".previous" \
373 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
374 : "3"(size), "0"(size), "1"(to), "2"(from) \
375 : "memory"); \
376 } while (0)
379 unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
380 {
381 if (movsl_is_ok(to, from, n))
382 __copy_user(to, from, n);
383 else
384 n = __copy_user_intel(to, from, n);
385 return n;
386 }
388 unsigned long
389 __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
390 {
391 if (movsl_is_ok(to, from, n))
392 __copy_user_zeroing(to, from, n);
393 else
394 n = __copy_user_zeroing_intel(to, from, n);
395 return n;
396 }
398 /**
399 * copy_to_user: - Copy a block of data into user space.
400 * @to: Destination address, in user space.
401 * @from: Source address, in kernel space.
402 * @n: Number of bytes to copy.
403 *
404 * Context: User context only. This function may sleep.
405 *
406 * Copy data from kernel space to user space.
407 *
408 * Returns number of bytes that could not be copied.
409 * On success, this will be zero.
410 */
411 unsigned long
412 copy_to_user(void __user *to, const void *from, unsigned long n)
413 {
414 if (access_ok(to, n))
415 n = __copy_to_user(to, from, n);
416 return n;
417 }
419 /**
420 * copy_from_user: - Copy a block of data from user space.
421 * @to: Destination address, in kernel space.
422 * @from: Source address, in user space.
423 * @n: Number of bytes to copy.
424 *
425 * Context: User context only. This function may sleep.
426 *
427 * Copy data from user space to kernel space.
428 *
429 * Returns number of bytes that could not be copied.
430 * On success, this will be zero.
431 *
432 * If some data could not be copied, this function will pad the copied
433 * data to the requested size using zero bytes.
434 */
435 unsigned long
436 copy_from_user(void *to, const void __user *from, unsigned long n)
437 {
438 if (access_ok(from, n))
439 n = __copy_from_user(to, from, n);
440 else
441 memset(to, 0, n);
442 return n;
443 }