ia64/xen-unstable

changeset 13452:fe76b80d081a

[XEN] Include byteorder functions from Linux.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Jan 17 12:11:02 2007 +0000 (2007-01-17)
parents 8a397303fe09
children e798c9010e93
files xen/include/asm-powerpc/byteorder.h xen/include/asm-x86/byteorder.h xen/include/xen/byteorder/big_endian.h xen/include/xen/byteorder/generic.h xen/include/xen/byteorder/little_endian.h xen/include/xen/byteorder/swab.h xen/include/xen/config.h xen/include/xen/types.h
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/xen/include/asm-powerpc/byteorder.h	Wed Jan 17 12:11:02 2007 +0000
     1.3 @@ -0,0 +1,80 @@
     1.4 +#ifndef _ASM_POWERPC_BYTEORDER_H
     1.5 +#define _ASM_POWERPC_BYTEORDER_H
     1.6 +
     1.7 +/*
     1.8 + * This program is free software; you can redistribute it and/or
     1.9 + * modify it under the terms of the GNU General Public License
    1.10 + * as published by the Free Software Foundation; either version
    1.11 + * 2 of the License, or (at your option) any later version.
    1.12 + */
    1.13 +
    1.14 +#include <asm/types.h>
    1.15 +#include <xen/compiler.h>
    1.16 +
    1.17 +static inline __u16 ld_le16(const volatile __u16 *addr)
    1.18 +{
    1.19 +    __u16 val;
    1.20 +
    1.21 +    asm volatile ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
    1.22 +    return val;
    1.23 +}
    1.24 +
    1.25 +static inline void st_le16(volatile __u16 *addr, const __u16 val)
    1.26 +{
    1.27 +    asm volatile ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
    1.28 +}
    1.29 +
    1.30 +static inline __u32 ld_le32(const volatile __u32 *addr)
    1.31 +{
    1.32 +    __u32 val;
    1.33 +
    1.34 +    asm volatile ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
    1.35 +    return val;
    1.36 +}
    1.37 +
    1.38 +static inline void st_le32(volatile __u32 *addr, const __u32 val)
    1.39 +{
    1.40 +    asm volatile ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
    1.41 +}
    1.42 +
    1.43 +static inline __attribute_const__ __u16 ___arch__swab16(__u16 value)
    1.44 +{
    1.45 +    __u16 result;
    1.46 +
    1.47 +    asm("rlwimi %0,%1,8,16,23"
    1.48 +        : "=r" (result)
    1.49 +        : "r" (value), "0" (value >> 8));
    1.50 +    return result;
    1.51 +}
    1.52 +
    1.53 +static inline __attribute_const__ __u32 ___arch__swab32(__u32 value)
    1.54 +{
    1.55 +    __u32 result;
    1.56 +
    1.57 +    asm("rlwimi %0,%1,24,16,23\n\t"
    1.58 +        "rlwimi %0,%1,8,8,15\n\t"
    1.59 +        "rlwimi %0,%1,24,0,7"
    1.60 +        : "=r" (result)
    1.61 +        : "r" (value), "0" (value >> 24));
    1.62 +    return result;
    1.63 +}
    1.64 +
    1.65 +#define __arch__swab16(x) ___arch__swab16(x)
    1.66 +#define __arch__swab32(x) ___arch__swab32(x)
    1.67 +
    1.68 +/* The same, but returns converted value from the location pointer by addr. */
    1.69 +#define __arch__swab16p(addr) ld_le16(addr)
    1.70 +#define __arch__swab32p(addr) ld_le32(addr)
    1.71 +
    1.72 +/* The same, but do the conversion in situ, ie. put the value back to addr. */
    1.73 +#define __arch__swab16s(addr) st_le16(addr,*addr)
    1.74 +#define __arch__swab32s(addr) st_le32(addr,*addr)
    1.75 +
    1.76 +#define __BYTEORDER_HAS_U64__
    1.77 +#ifndef __powerpc64__
    1.78 +#define __SWAB_64_THRU_32__
    1.79 +#endif /* __powerpc64__ */
    1.80 +
    1.81 +#include <xen/byteorder/big_endian.h>
    1.82 +
    1.83 +#endif /* _ASM_POWERPC_BYTEORDER_H */
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/xen/include/asm-x86/byteorder.h	Wed Jan 17 12:11:02 2007 +0000
     2.3 @@ -0,0 +1,36 @@
     2.4 +#ifndef __ASM_X86_BYTEORDER_H__
     2.5 +#define __ASM_X86_BYTEORDER_H__
     2.6 +
     2.7 +#include <asm/types.h>
     2.8 +#include <xen/compiler.h>
     2.9 +
    2.10 +static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
    2.11 +{
    2.12 +    asm("bswap %0" : "=r" (x) : "0" (x));
    2.13 +    return x;
    2.14 +}
    2.15 +
    2.16 +static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
    2.17 +{ 
    2.18 +    union { 
    2.19 +        struct { __u32 a,b; } s;
    2.20 +        __u64 u;
    2.21 +    } v;
    2.22 +    v.u = val;
    2.23 +    asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 
    2.24 +        : "=r" (v.s.a), "=r" (v.s.b) 
    2.25 +        : "0" (v.s.a), "1" (v.s.b)); 
    2.26 +    return v.u;	
    2.27 +} 
    2.28 +
    2.29 +/* Do not define swab16.  Gcc is smart enough to recognize "C" version and
    2.30 +   convert it into rotation or exhange.  */
    2.31 +
    2.32 +#define __arch__swab64(x) ___arch__swab64(x)
    2.33 +#define __arch__swab32(x) ___arch__swab32(x)
    2.34 +
    2.35 +#define __BYTEORDER_HAS_U64__
    2.36 +
    2.37 +#include <xen/byteorder/little_endian.h>
    2.38 +
    2.39 +#endif /* __ASM_X86_BYTEORDER_H__ */
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/include/xen/byteorder/big_endian.h	Wed Jan 17 12:11:02 2007 +0000
     3.3 @@ -0,0 +1,106 @@
     3.4 +#ifndef __XEN_BYTEORDER_BIG_ENDIAN_H__
     3.5 +#define __XEN_BYTEORDER_BIG_ENDIAN_H__
     3.6 +
     3.7 +#ifndef __BIG_ENDIAN
     3.8 +#define __BIG_ENDIAN 4321
     3.9 +#endif
    3.10 +#ifndef __BIG_ENDIAN_BITFIELD
    3.11 +#define __BIG_ENDIAN_BITFIELD
    3.12 +#endif
    3.13 +
    3.14 +#include <xen/types.h>
    3.15 +#include <xen/byteorder/swab.h>
    3.16 +
    3.17 +#define __constant_htonl(x) ((__force __be32)(__u32)(x))
    3.18 +#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
    3.19 +#define __constant_htons(x) ((__force __be16)(__u16)(x))
    3.20 +#define __constant_ntohs(x) ((__force __u16)(__be16)(x))
    3.21 +#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
    3.22 +#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x))
    3.23 +#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
    3.24 +#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x))
    3.25 +#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
    3.26 +#define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x))
    3.27 +#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
    3.28 +#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
    3.29 +#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
    3.30 +#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
    3.31 +#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x))
    3.32 +#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x))
    3.33 +#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
    3.34 +#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
    3.35 +#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
    3.36 +#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
    3.37 +#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
    3.38 +#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
    3.39 +#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
    3.40 +#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
    3.41 +#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
    3.42 +#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
    3.43 +#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
    3.44 +#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
    3.45 +
    3.46 +static inline __le64 __cpu_to_le64p(const __u64 *p)
    3.47 +{
    3.48 +    return (__force __le64)__swab64p(p);
    3.49 +}
    3.50 +static inline __u64 __le64_to_cpup(const __le64 *p)
    3.51 +{
    3.52 +    return __swab64p((__u64 *)p);
    3.53 +}
    3.54 +static inline __le32 __cpu_to_le32p(const __u32 *p)
    3.55 +{
    3.56 +    return (__force __le32)__swab32p(p);
    3.57 +}
    3.58 +static inline __u32 __le32_to_cpup(const __le32 *p)
    3.59 +{
    3.60 +    return __swab32p((__u32 *)p);
    3.61 +}
    3.62 +static inline __le16 __cpu_to_le16p(const __u16 *p)
    3.63 +{
    3.64 +    return (__force __le16)__swab16p(p);
    3.65 +}
    3.66 +static inline __u16 __le16_to_cpup(const __le16 *p)
    3.67 +{
    3.68 +    return __swab16p((__u16 *)p);
    3.69 +}
    3.70 +static inline __be64 __cpu_to_be64p(const __u64 *p)
    3.71 +{
    3.72 +    return (__force __be64)*p;
    3.73 +}
    3.74 +static inline __u64 __be64_to_cpup(const __be64 *p)
    3.75 +{
    3.76 +    return (__force __u64)*p;
    3.77 +}
    3.78 +static inline __be32 __cpu_to_be32p(const __u32 *p)
    3.79 +{
    3.80 +    return (__force __be32)*p;
    3.81 +}
    3.82 +static inline __u32 __be32_to_cpup(const __be32 *p)
    3.83 +{
    3.84 +    return (__force __u32)*p;
    3.85 +}
    3.86 +static inline __be16 __cpu_to_be16p(const __u16 *p)
    3.87 +{
    3.88 +    return (__force __be16)*p;
    3.89 +}
    3.90 +static inline __u16 __be16_to_cpup(const __be16 *p)
    3.91 +{
    3.92 +    return (__force __u16)*p;
    3.93 +}
    3.94 +#define __cpu_to_le64s(x) __swab64s((x))
    3.95 +#define __le64_to_cpus(x) __swab64s((x))
    3.96 +#define __cpu_to_le32s(x) __swab32s((x))
    3.97 +#define __le32_to_cpus(x) __swab32s((x))
    3.98 +#define __cpu_to_le16s(x) __swab16s((x))
    3.99 +#define __le16_to_cpus(x) __swab16s((x))
   3.100 +#define __cpu_to_be64s(x) do {} while (0)
   3.101 +#define __be64_to_cpus(x) do {} while (0)
   3.102 +#define __cpu_to_be32s(x) do {} while (0)
   3.103 +#define __be32_to_cpus(x) do {} while (0)
   3.104 +#define __cpu_to_be16s(x) do {} while (0)
   3.105 +#define __be16_to_cpus(x) do {} while (0)
   3.106 +
   3.107 +#include <xen/byteorder/generic.h>
   3.108 +
   3.109 +#endif /* __XEN_BYTEORDER_BIG_ENDIAN_H__ */
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/include/xen/byteorder/generic.h	Wed Jan 17 12:11:02 2007 +0000
     4.3 @@ -0,0 +1,68 @@
     4.4 +#ifndef __XEN_BYTEORDER_GENERIC_H__
     4.5 +#define __XEN_BYTEORDER_GENERIC_H__
     4.6 +
     4.7 +/*
     4.8 + * Generic Byte-reordering support
     4.9 + *
    4.10 + * The "... p" macros, like le64_to_cpup, can be used with pointers
    4.11 + * to unaligned data, but there will be a performance penalty on 
    4.12 + * some architectures.  Use get_unaligned for unaligned data.
    4.13 + *
    4.14 + * The following macros are to be defined by <asm/byteorder.h>:
    4.15 + *
    4.16 + * Conversion of XX-bit integers (16- 32- or 64-)
    4.17 + * between native CPU format and little/big endian format
    4.18 + * 64-bit stuff only defined for proper architectures
    4.19 + *     cpu_to_[bl]eXX(__uXX x)
    4.20 + *     [bl]eXX_to_cpu(__uXX x)
    4.21 + *
    4.22 + * The same, but takes a pointer to the value to convert
    4.23 + *     cpu_to_[bl]eXXp(__uXX x)
    4.24 + *     [bl]eXX_to_cpup(__uXX x)
    4.25 + *
    4.26 + * The same, but change in situ
    4.27 + *     cpu_to_[bl]eXXs(__uXX x)
    4.28 + *     [bl]eXX_to_cpus(__uXX x)
    4.29 + *
    4.30 + * See asm-foo/byteorder.h for examples of how to provide
    4.31 + * architecture-optimized versions
    4.32 + */
    4.33 +
    4.34 +#define cpu_to_le64 __cpu_to_le64
    4.35 +#define le64_to_cpu __le64_to_cpu
    4.36 +#define cpu_to_le32 __cpu_to_le32
    4.37 +#define le32_to_cpu __le32_to_cpu
    4.38 +#define cpu_to_le16 __cpu_to_le16
    4.39 +#define le16_to_cpu __le16_to_cpu
    4.40 +#define cpu_to_be64 __cpu_to_be64
    4.41 +#define be64_to_cpu __be64_to_cpu
    4.42 +#define cpu_to_be32 __cpu_to_be32
    4.43 +#define be32_to_cpu __be32_to_cpu
    4.44 +#define cpu_to_be16 __cpu_to_be16
    4.45 +#define be16_to_cpu __be16_to_cpu
    4.46 +#define cpu_to_le64p __cpu_to_le64p
    4.47 +#define le64_to_cpup __le64_to_cpup
    4.48 +#define cpu_to_le32p __cpu_to_le32p
    4.49 +#define le32_to_cpup __le32_to_cpup
    4.50 +#define cpu_to_le16p __cpu_to_le16p
    4.51 +#define le16_to_cpup __le16_to_cpup
    4.52 +#define cpu_to_be64p __cpu_to_be64p
    4.53 +#define be64_to_cpup __be64_to_cpup
    4.54 +#define cpu_to_be32p __cpu_to_be32p
    4.55 +#define be32_to_cpup __be32_to_cpup
    4.56 +#define cpu_to_be16p __cpu_to_be16p
    4.57 +#define be16_to_cpup __be16_to_cpup
    4.58 +#define cpu_to_le64s __cpu_to_le64s
    4.59 +#define le64_to_cpus __le64_to_cpus
    4.60 +#define cpu_to_le32s __cpu_to_le32s
    4.61 +#define le32_to_cpus __le32_to_cpus
    4.62 +#define cpu_to_le16s __cpu_to_le16s
    4.63 +#define le16_to_cpus __le16_to_cpus
    4.64 +#define cpu_to_be64s __cpu_to_be64s
    4.65 +#define be64_to_cpus __be64_to_cpus
    4.66 +#define cpu_to_be32s __cpu_to_be32s
    4.67 +#define be32_to_cpus __be32_to_cpus
    4.68 +#define cpu_to_be16s __cpu_to_be16s
    4.69 +#define be16_to_cpus __be16_to_cpus
    4.70 +
    4.71 +#endif /* __XEN_BYTEORDER_GENERIC_H__ */
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/include/xen/byteorder/little_endian.h	Wed Jan 17 12:11:02 2007 +0000
     5.3 @@ -0,0 +1,106 @@
     5.4 +#ifndef __XEN_BYTEORDER_LITTLE_ENDIAN_H__
     5.5 +#define __XEN_BYTEORDER_LITTLE_ENDIAN_H__
     5.6 +
     5.7 +#ifndef __LITTLE_ENDIAN
     5.8 +#define __LITTLE_ENDIAN 1234
     5.9 +#endif
    5.10 +#ifndef __LITTLE_ENDIAN_BITFIELD
    5.11 +#define __LITTLE_ENDIAN_BITFIELD
    5.12 +#endif
    5.13 +
    5.14 +#include <xen/types.h>
    5.15 +#include <xen/byteorder/swab.h>
    5.16 +
    5.17 +#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
    5.18 +#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
    5.19 +#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
    5.20 +#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
    5.21 +#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
    5.22 +#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
    5.23 +#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
    5.24 +#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
    5.25 +#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
    5.26 +#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
    5.27 +#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
    5.28 +#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
    5.29 +#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
    5.30 +#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
    5.31 +#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
    5.32 +#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
    5.33 +#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
    5.34 +#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
    5.35 +#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
    5.36 +#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
    5.37 +#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
    5.38 +#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
    5.39 +#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
    5.40 +#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
    5.41 +#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
    5.42 +#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
    5.43 +#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
    5.44 +#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
    5.45 +
    5.46 +static inline __le64 __cpu_to_le64p(const __u64 *p)
    5.47 +{
    5.48 +    return (__force __le64)*p;
    5.49 +}
    5.50 +static inline __u64 __le64_to_cpup(const __le64 *p)
    5.51 +{
    5.52 +    return (__force __u64)*p;
    5.53 +}
    5.54 +static inline __le32 __cpu_to_le32p(const __u32 *p)
    5.55 +{
    5.56 +    return (__force __le32)*p;
    5.57 +}
    5.58 +static inline __u32 __le32_to_cpup(const __le32 *p)
    5.59 +{
    5.60 +    return (__force __u32)*p;
    5.61 +}
    5.62 +static inline __le16 __cpu_to_le16p(const __u16 *p)
    5.63 +{
    5.64 +    return (__force __le16)*p;
    5.65 +}
    5.66 +static inline __u16 __le16_to_cpup(const __le16 *p)
    5.67 +{
    5.68 +    return (__force __u16)*p;
    5.69 +}
    5.70 +static inline __be64 __cpu_to_be64p(const __u64 *p)
    5.71 +{
    5.72 +    return (__force __be64)__swab64p(p);
    5.73 +}
    5.74 +static inline __u64 __be64_to_cpup(const __be64 *p)
    5.75 +{
    5.76 +    return __swab64p((__u64 *)p);
    5.77 +}
    5.78 +static inline __be32 __cpu_to_be32p(const __u32 *p)
    5.79 +{
    5.80 +    return (__force __be32)__swab32p(p);
    5.81 +}
    5.82 +static inline __u32 __be32_to_cpup(const __be32 *p)
    5.83 +{
    5.84 +    return __swab32p((__u32 *)p);
    5.85 +}
    5.86 +static inline __be16 __cpu_to_be16p(const __u16 *p)
    5.87 +{
    5.88 +    return (__force __be16)__swab16p(p);
    5.89 +}
    5.90 +static inline __u16 __be16_to_cpup(const __be16 *p)
    5.91 +{
    5.92 +    return __swab16p((__u16 *)p);
    5.93 +}
    5.94 +#define __cpu_to_le64s(x) do {} while (0)
    5.95 +#define __le64_to_cpus(x) do {} while (0)
    5.96 +#define __cpu_to_le32s(x) do {} while (0)
    5.97 +#define __le32_to_cpus(x) do {} while (0)
    5.98 +#define __cpu_to_le16s(x) do {} while (0)
    5.99 +#define __le16_to_cpus(x) do {} while (0)
   5.100 +#define __cpu_to_be64s(x) __swab64s((x))
   5.101 +#define __be64_to_cpus(x) __swab64s((x))
   5.102 +#define __cpu_to_be32s(x) __swab32s((x))
   5.103 +#define __be32_to_cpus(x) __swab32s((x))
   5.104 +#define __cpu_to_be16s(x) __swab16s((x))
   5.105 +#define __be16_to_cpus(x) __swab16s((x))
   5.106 +
   5.107 +#include <xen/byteorder/generic.h>
   5.108 +
   5.109 +#endif /* __XEN_BYTEORDER_LITTLE_ENDIAN_H__ */
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/include/xen/byteorder/swab.h	Wed Jan 17 12:11:02 2007 +0000
     6.3 @@ -0,0 +1,185 @@
     6.4 +#ifndef __XEN_BYTEORDER_SWAB_H__
     6.5 +#define __XEN_BYTEORDER_SWAB_H__
     6.6 +
     6.7 +/*
     6.8 + * Byte-swapping, independently from CPU endianness
     6.9 + *     swabXX[ps]?(foo)
    6.10 + *
    6.11 + * Francois-Rene Rideau <fare@tunes.org> 19971205
    6.12 + *    separated swab functions from cpu_to_XX,
    6.13 + *    to clean up support for bizarre-endian architectures.
    6.14 + */
    6.15 +
    6.16 +#include <xen/compiler.h>
    6.17 +
    6.18 +/* casts are necessary for constants, because we never know how for sure
    6.19 + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
    6.20 + */
    6.21 +#define ___swab16(x)                                    \
    6.22 +({                                                      \
    6.23 +    __u16 __x = (x);                                    \
    6.24 +    ((__u16)(                                           \
    6.25 +        (((__u16)(__x) & (__u16)0x00ffU) << 8) |        \
    6.26 +        (((__u16)(__x) & (__u16)0xff00U) >> 8) ));      \
    6.27 +})
    6.28 +
    6.29 +#define ___swab32(x)                                            \
    6.30 +({                                                              \
    6.31 +    __u32 __x = (x);                                            \
    6.32 +    ((__u32)(                                                   \
    6.33 +        (((__u32)(__x) & (__u32)0x000000ffUL) << 24) |          \
    6.34 +        (((__u32)(__x) & (__u32)0x0000ff00UL) <<  8) |          \
    6.35 +        (((__u32)(__x) & (__u32)0x00ff0000UL) >>  8) |          \
    6.36 +        (((__u32)(__x) & (__u32)0xff000000UL) >> 24) ));        \
    6.37 +})
    6.38 +
    6.39 +#define ___swab64(x)                                                       \
    6.40 +({                                                                         \
    6.41 +    __u64 __x = (x);                                                       \
    6.42 +    ((__u64)(                                                              \
    6.43 +        (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) |     \
    6.44 +        (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) |     \
    6.45 +        (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) |     \
    6.46 +        (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) <<  8) |     \
    6.47 +            (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >>  8) | \
    6.48 +        (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) |     \
    6.49 +        (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) |     \
    6.50 +        (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) ));   \
    6.51 +})
    6.52 +
    6.53 +#define ___constant_swab16(x)                   \
    6.54 +    ((__u16)(                                   \
    6.55 +        (((__u16)(x) & (__u16)0x00ffU) << 8) |  \
    6.56 +        (((__u16)(x) & (__u16)0xff00U) >> 8) ))
    6.57 +#define ___constant_swab32(x)                           \
    6.58 +    ((__u32)(                                           \
    6.59 +        (((__u32)(x) & (__u32)0x000000ffUL) << 24) |    \
    6.60 +        (((__u32)(x) & (__u32)0x0000ff00UL) <<  8) |    \
    6.61 +        (((__u32)(x) & (__u32)0x00ff0000UL) >>  8) |    \
    6.62 +        (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
    6.63 +#define ___constant_swab64(x)                                            \
    6.64 +    ((__u64)(                                                            \
    6.65 +        (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) |     \
    6.66 +        (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) |     \
    6.67 +        (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) |     \
    6.68 +        (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) <<  8) |     \
    6.69 +            (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >>  8) | \
    6.70 +        (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) |     \
    6.71 +        (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) |     \
    6.72 +        (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
    6.73 +
    6.74 +/*
    6.75 + * provide defaults when no architecture-specific optimization is detected
    6.76 + */
    6.77 +#ifndef __arch__swab16
    6.78 +#  define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
    6.79 +#endif
    6.80 +#ifndef __arch__swab32
    6.81 +#  define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
    6.82 +#endif
    6.83 +#ifndef __arch__swab64
    6.84 +#  define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
    6.85 +#endif
    6.86 +
    6.87 +#ifndef __arch__swab16p
    6.88 +#  define __arch__swab16p(x) __arch__swab16(*(x))
    6.89 +#endif
    6.90 +#ifndef __arch__swab32p
    6.91 +#  define __arch__swab32p(x) __arch__swab32(*(x))
    6.92 +#endif
    6.93 +#ifndef __arch__swab64p
    6.94 +#  define __arch__swab64p(x) __arch__swab64(*(x))
    6.95 +#endif
    6.96 +
    6.97 +#ifndef __arch__swab16s
    6.98 +#  define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
    6.99 +#endif
   6.100 +#ifndef __arch__swab32s
   6.101 +#  define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
   6.102 +#endif
   6.103 +#ifndef __arch__swab64s
   6.104 +#  define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
   6.105 +#endif
   6.106 +
   6.107 +
   6.108 +/*
   6.109 + * Allow constant folding
   6.110 + */
   6.111 +#if defined(__GNUC__) && defined(__OPTIMIZE__)
   6.112 +#  define __swab16(x) \
   6.113 +(__builtin_constant_p((__u16)(x)) ? \
   6.114 + ___swab16((x)) : \
   6.115 + __fswab16((x)))
   6.116 +#  define __swab32(x) \
   6.117 +(__builtin_constant_p((__u32)(x)) ? \
   6.118 + ___swab32((x)) : \
   6.119 + __fswab32((x)))
   6.120 +#  define __swab64(x) \
   6.121 +(__builtin_constant_p((__u64)(x)) ? \
   6.122 + ___swab64((x)) : \
   6.123 + __fswab64((x)))
   6.124 +#else
   6.125 +#  define __swab16(x) __fswab16(x)
   6.126 +#  define __swab32(x) __fswab32(x)
   6.127 +#  define __swab64(x) __fswab64(x)
   6.128 +#endif /* OPTIMIZE */
   6.129 +
   6.130 +
   6.131 +static inline __attribute_const__ __u16 __fswab16(__u16 x)
   6.132 +{
   6.133 +    return __arch__swab16(x);
   6.134 +}
   6.135 +static inline __u16 __swab16p(const __u16 *x)
   6.136 +{
   6.137 +    return __arch__swab16p(x);
   6.138 +}
   6.139 +static inline void __swab16s(__u16 *addr)
   6.140 +{
   6.141 +    __arch__swab16s(addr);
   6.142 +}
   6.143 +
   6.144 +static inline __attribute_const__ __u32 __fswab32(__u32 x)
   6.145 +{
   6.146 +    return __arch__swab32(x);
   6.147 +}
   6.148 +static inline __u32 __swab32p(const __u32 *x)
   6.149 +{
   6.150 +    return __arch__swab32p(x);
   6.151 +}
   6.152 +static inline void __swab32s(__u32 *addr)
   6.153 +{
   6.154 +    __arch__swab32s(addr);
   6.155 +}
   6.156 +
   6.157 +#ifdef __BYTEORDER_HAS_U64__
   6.158 +static inline __attribute_const__ __u64 __fswab64(__u64 x)
   6.159 +{
   6.160 +#  ifdef __SWAB_64_THRU_32__
   6.161 +    __u32 h = x >> 32;
   6.162 +        __u32 l = x & ((1ULL<<32)-1);
   6.163 +        return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
   6.164 +#  else
   6.165 +    return __arch__swab64(x);
   6.166 +#  endif
   6.167 +}
   6.168 +static inline __u64 __swab64p(const __u64 *x)
   6.169 +{
   6.170 +    return __arch__swab64p(x);
   6.171 +}
   6.172 +static inline void __swab64s(__u64 *addr)
   6.173 +{
   6.174 +    __arch__swab64s(addr);
   6.175 +}
   6.176 +#endif /* __BYTEORDER_HAS_U64__ */
   6.177 +
   6.178 +#define swab16 __swab16
   6.179 +#define swab32 __swab32
   6.180 +#define swab64 __swab64
   6.181 +#define swab16p __swab16p
   6.182 +#define swab32p __swab32p
   6.183 +#define swab64p __swab64p
   6.184 +#define swab16s __swab16s
   6.185 +#define swab32s __swab32s
   6.186 +#define swab64s __swab64s
   6.187 +
   6.188 +#endif /* __XEN_BYTEORDER_SWAB_H__ */
     7.1 --- a/xen/include/xen/config.h	Wed Jan 17 10:33:13 2007 +0000
     7.2 +++ b/xen/include/xen/config.h	Wed Jan 17 12:11:02 2007 +0000
     7.3 @@ -63,6 +63,8 @@
     7.4  /* Linux 'checker' project. */
     7.5  #define __iomem
     7.6  #define __user
     7.7 +#define __force
     7.8 +#define __bitwise
     7.9  
    7.10  #ifndef __ASSEMBLY__
    7.11  
     8.1 --- a/xen/include/xen/types.h	Wed Jan 17 10:33:13 2007 +0000
     8.2 +++ b/xen/include/xen/types.h	Wed Jan 17 12:11:02 2007 +0000
     8.3 @@ -51,4 +51,11 @@ typedef         __s64           int64_t;
     8.4  struct domain;
     8.5  struct vcpu;
     8.6  
     8.7 +typedef __u16 __le16;
     8.8 +typedef __u16 __be16;
     8.9 +typedef __u32 __le32;
    8.10 +typedef __u32 __be32;
    8.11 +typedef __u64 __le64;
    8.12 +typedef __u64 __be64;
    8.13 +
    8.14  #endif /* __TYPES_H__ */