#include <xen/byteorder/swab.h>
#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
-#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x))
+#define __constant_le64_to_cpu(x) ___constant_swab64((__force uint64_t)(__le64)(x))
#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
#define __constant_le32_to_cpu(x) ___constant_swab32((__force uint32_t)(__le32)(x))
#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
#define __constant_le16_to_cpu(x) ___constant_swab16((__force uint16_t)(__le16)(x))
-#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
-#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __constant_cpu_to_be64(x) ((__force __be64)(uint64_t)(x))
+#define __constant_be64_to_cpu(x) ((__force uint64_t)(__be64)(x))
#define __constant_cpu_to_be32(x) ((__force __be32)(uint32_t)(x))
#define __constant_be32_to_cpu(x) ((__force uint32_t)(__be32)(x))
#define __constant_cpu_to_be16(x) ((__force __be16)(uint16_t)(x))
#define __constant_be16_to_cpu(x) ((__force uint16_t)(__be16)(x))
#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
-#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
+#define __le64_to_cpu(x) __swab64((__force uint64_t)(__le64)(x))
#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
#define __le32_to_cpu(x) __swab32((__force uint32_t)(__le32)(x))
#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
#define __le16_to_cpu(x) __swab16((__force uint16_t)(__le16)(x))
-#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
-#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
+#define __cpu_to_be64(x) ((__force __be64)(uint64_t)(x))
+#define __be64_to_cpu(x) ((__force uint64_t)(__be64)(x))
#define __cpu_to_be32(x) ((__force __be32)(uint32_t)(x))
#define __be32_to_cpu(x) ((__force uint32_t)(__be32)(x))
#define __cpu_to_be16(x) ((__force __be16)(uint16_t)(x))
#define __be16_to_cpu(x) ((__force uint16_t)(__be16)(x))
-static inline __le64 __cpu_to_le64p(const __u64 *p)
+static inline __le64 __cpu_to_le64p(const uint64_t *p)
{
return (__force __le64)__swab64p(p);
}
-static inline __u64 __le64_to_cpup(const __le64 *p)
+static inline uint64_t __le64_to_cpup(const __le64 *p)
{
- return __swab64p((__u64 *)p);
+ return __swab64p((const uint64_t *)p);
}
static inline __le32 __cpu_to_le32p(const uint32_t *p)
{
{
return __swab16p((const uint16_t *)p);
}
-static inline __be64 __cpu_to_be64p(const __u64 *p)
+static inline __be64 __cpu_to_be64p(const uint64_t *p)
{
return (__force __be64)*p;
}
-static inline __u64 __be64_to_cpup(const __be64 *p)
+static inline uint64_t __be64_to_cpup(const __be64 *p)
{
- return (__force __u64)*p;
+ return (__force uint64_t)*p;
}
static inline __be32 __cpu_to_be32p(const uint32_t *p)
{
#include <xen/types.h>
#include <xen/byteorder/swab.h>
-#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
-#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __constant_cpu_to_le64(x) ((__force __le64)(uint64_t)(x))
+#define __constant_le64_to_cpu(x) ((__force uint64_t)(__le64)(x))
#define __constant_cpu_to_le32(x) ((__force __le32)(uint32_t)(x))
#define __constant_le32_to_cpu(x) ((__force uint32_t)(__le32)(x))
#define __constant_cpu_to_le16(x) ((__force __le16)(uint16_t)(x))
#define __constant_le16_to_cpu(x) ((__force uint16_t)(__le16)(x))
#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
-#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
+#define __constant_be64_to_cpu(x) ___constant_swab64((__force uint64_t)(__be64)(x))
#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
#define __constant_be32_to_cpu(x) ___constant_swab32((__force uint32_t)(__be32)(x))
#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
#define __constant_be16_to_cpu(x) ___constant_swab16((__force uint16_t)(__be16)(x))
-#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
-#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+#define __cpu_to_le64(x) ((__force __le64)(uint64_t)(x))
+#define __le64_to_cpu(x) ((__force uint64_t)(__le64)(x))
#define __cpu_to_le32(x) ((__force __le32)(uint32_t)(x))
#define __le32_to_cpu(x) ((__force uint32_t)(__le32)(x))
#define __cpu_to_le16(x) ((__force __le16)(uint16_t)(x))
#define __le16_to_cpu(x) ((__force uint16_t)(__le16)(x))
#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
-#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+#define __be64_to_cpu(x) __swab64((__force uint64_t)(__be64)(x))
#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
#define __be32_to_cpu(x) __swab32((__force uint32_t)(__be32)(x))
#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
#define __be16_to_cpu(x) __swab16((__force uint16_t)(__be16)(x))
-static inline __le64 __cpu_to_le64p(const __u64 *p)
+static inline __le64 __cpu_to_le64p(const uint64_t *p)
{
return (__force __le64)*p;
}
-static inline __u64 __le64_to_cpup(const __le64 *p)
+static inline uint64_t __le64_to_cpup(const __le64 *p)
{
- return (__force __u64)*p;
+ return (__force uint64_t)*p;
}
static inline __le32 __cpu_to_le32p(const uint32_t *p)
{
{
return (__force uint16_t)*p;
}
-static inline __be64 __cpu_to_be64p(const __u64 *p)
+static inline __be64 __cpu_to_be64p(const uint64_t *p)
{
return (__force __be64)__swab64p(p);
}
-static inline __u64 __be64_to_cpup(const __be64 *p)
+static inline uint64_t __be64_to_cpup(const __be64 *p)
{
- return __swab64p((__u64 *)p);
+ return __swab64p((const uint64_t *)p);
}
static inline __be32 __cpu_to_be32p(const uint32_t *p)
{
* to clean up support for bizarre-endian architectures.
*/
-/*
- * Casts are necessary for constants, because we never know for sure how
- * UL/ULL map to __u64. At least not in a portable way.
- */
#define ___swab16(x) \
({ \
uint16_t x_ = (x); \
#define ___swab64(x) \
({ \
- __u64 __x = (x); \
- ((__u64)( \
- (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
- (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
- (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
- (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
- (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
- (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
- (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
- (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
+ uint64_t x_ = (x); \
+ (uint64_t)( \
+ (((uint64_t)(x_) & 0x00000000000000ffULL) << 56) | \
+ (((uint64_t)(x_) & 0x000000000000ff00ULL) << 40) | \
+ (((uint64_t)(x_) & 0x0000000000ff0000ULL) << 24) | \
+ (((uint64_t)(x_) & 0x00000000ff000000ULL) << 8) | \
+ (((uint64_t)(x_) & 0x000000ff00000000ULL) >> 8) | \
+ (((uint64_t)(x_) & 0x0000ff0000000000ULL) >> 24) | \
+ (((uint64_t)(x_) & 0x00ff000000000000ULL) >> 40) | \
+ (((uint64_t)(x_) & 0xff00000000000000ULL) >> 56)); \
})
#define ___constant_swab16(x) \
(((uint32_t)(x) & 0x00ff0000U) >> 8) | \
(((uint32_t)(x) & 0xff000000U) >> 24)))
#define ___constant_swab64(x) \
- ((__u64)( \
- (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
- (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
- (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
- (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
- (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
- (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
- (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
- (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
+ ((uint64_t)( \
+ (((uint64_t)(x) & 0x00000000000000ffULL) << 56) | \
+ (((uint64_t)(x) & 0x000000000000ff00ULL) << 40) | \
+ (((uint64_t)(x) & 0x0000000000ff0000ULL) << 24) | \
+ (((uint64_t)(x) & 0x00000000ff000000ULL) << 8) | \
+ (((uint64_t)(x) & 0x000000ff00000000ULL) >> 8) | \
+ (((uint64_t)(x) & 0x0000ff0000000000ULL) >> 24) | \
+ (((uint64_t)(x) & 0x00ff000000000000ULL) >> 40) | \
+ (((uint64_t)(x) & 0xff00000000000000ULL) >> 56)))
/*
* provide defaults when no architecture-specific optimization is detected
# define __arch__swab32(x) ___swab32(x)
#endif
#ifndef __arch__swab64
-# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
+# define __arch__swab64(x) ___swab64(x)
#endif
#ifndef __arch__swab16p
___swab32((x)) : \
__fswab32((x)))
# define __swab64(x) \
-(__builtin_constant_p((__u64)(x)) ? \
+(__builtin_constant_p((uint64_t)(x)) ? \
___swab64((x)) : \
__fswab64((x)))
#else
}
#ifdef __BYTEORDER_HAS_U64__
-static inline attr_const __u64 __fswab64(__u64 x)
+static inline attr_const uint64_t __fswab64(uint64_t x)
{
# ifdef __SWAB_64_THRU_32__
uint32_t h = x >> 32, l = x;
- return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
+ return ((uint64_t)__swab32(l) << 32) | __swab32(h);
# else
return __arch__swab64(x);
# endif
}
-static inline __u64 __swab64p(const __u64 *x)
+static inline uint64_t __swab64p(const uint64_t *x)
{
return __arch__swab64p(x);
}
-static inline void __swab64s(__u64 *addr)
+static inline void __swab64s(uint64_t *addr)
{
__arch__swab64s(addr);
}