ia64/linux-2.6.18-xen.hg

view arch/v850/lib/memcpy.c @ 854:950b9eb27661

usbback: fix urb interval value for interrupt urbs.

Signed-off-by: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Apr 06 13:51:20 2009 +0100 (2009-04-06)
parents 831230e53067
children
line source
1 /*
2 * arch/v850/lib/memcpy.c -- Memory copying
3 *
4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
14 #include <linux/types.h>
15 #include <asm/string.h>
17 #define CHUNK_SIZE 32 /* bytes */
18 #define CHUNK_ALIGNED(addr) (((unsigned long)addr & 0x3) == 0)
20 /* Note that this macro uses 8 call-clobbered registers (not including
21 R1), which are few enough so that the following functions don't need
22 to spill anything to memory. It also uses R1, which is nominally
23 reserved for the assembler, but here it should be OK. */
24 #define COPY_CHUNK(src, dst) \
25 asm ("mov %0, ep;" \
26 "sld.w 0[ep], r1; sld.w 4[ep], r12;" \
27 "sld.w 8[ep], r13; sld.w 12[ep], r14;" \
28 "sld.w 16[ep], r15; sld.w 20[ep], r17;" \
29 "sld.w 24[ep], r18; sld.w 28[ep], r19;" \
30 "mov %1, ep;" \
31 "sst.w r1, 0[ep]; sst.w r12, 4[ep];" \
32 "sst.w r13, 8[ep]; sst.w r14, 12[ep];" \
33 "sst.w r15, 16[ep]; sst.w r17, 20[ep];" \
34 "sst.w r18, 24[ep]; sst.w r19, 28[ep]" \
35 :: "r" (src), "r" (dst) \
36 : "r1", "r12", "r13", "r14", "r15", \
37 "r17", "r18", "r19", "ep", "memory");
39 void *memcpy (void *dst, const void *src, __kernel_size_t size)
40 {
41 char *_dst = dst;
42 const char *_src = src;
44 if (size >= CHUNK_SIZE && CHUNK_ALIGNED(_src) && CHUNK_ALIGNED(_dst)) {
45 /* Copy large blocks efficiently. */
46 unsigned count;
47 for (count = size / CHUNK_SIZE; count; count--) {
48 COPY_CHUNK (_src, _dst);
49 _src += CHUNK_SIZE;
50 _dst += CHUNK_SIZE;
51 }
52 size %= CHUNK_SIZE;
53 }
55 if (size > 0)
56 do
57 *_dst++ = *_src++;
58 while (--size);
60 return dst;
61 }
63 void *memmove (void *dst, const void *src, __kernel_size_t size)
64 {
65 if ((unsigned long)dst < (unsigned long)src
66 || (unsigned long)src + size < (unsigned long)dst)
67 return memcpy (dst, src, size);
68 else {
69 char *_dst = dst + size;
70 const char *_src = src + size;
72 if (size >= CHUNK_SIZE
73 && CHUNK_ALIGNED (_src) && CHUNK_ALIGNED (_dst))
74 {
75 /* Copy large blocks efficiently. */
76 unsigned count;
77 for (count = size / CHUNK_SIZE; count; count--) {
78 _src -= CHUNK_SIZE;
79 _dst -= CHUNK_SIZE;
80 COPY_CHUNK (_src, _dst);
81 }
82 size %= CHUNK_SIZE;
83 }
85 if (size > 0)
86 do
87 *--_dst = *--_src;
88 while (--size);
90 return _dst;
91 }
92 }