/* start for Xen */
#include <xen/init.h>
+#include <xen/byteswap.h>
#include <xen/types.h>
#include <xen/lib.h>
#include <crypto/vmac.h>
* MUL64: 64x64->128-bit multiplication
* PMUL64: assumes top bits cleared on inputs
* ADD128: 128x128->128-bit addition
- * GET_REVERSED_64: load and byte-reverse 64-bit word
* ----------------------------------------------------------------------- */
/* ----------------------------------------------------------------------- */
#define PMUL64 MUL64
-#define GET_REVERSED_64(p) \
- ({uint64_t x; \
- asm ("bswapq %0" : "=r" (x) : "0"(*(uint64_t *)(p))); x;})
-
-/* ----------------------------------------------------------------------- */
-#elif (__GNUC__ && __i386__)
-/* ----------------------------------------------------------------------- */
-
-#define GET_REVERSED_64(p) \
- ({ uint64_t x; \
- uint32_t *tp = (uint32_t *)(p); \
- asm ("bswap %%edx\n\t" \
- "bswap %%eax" \
- : "=A"(x) \
- : "a"(tp[1]), "d"(tp[0])); \
- x; })
/* ----------------------------------------------------------------------- */
#elif (__GNUC__ && __ppc64__)
#define PMUL64 MUL64
-#define GET_REVERSED_64(p) \
- ({ uint32_t hi, lo, *_p = (uint32_t *)(p); \
- asm volatile ("lwbrx %0, %1, %2" : "=r"(lo) : "b%"(0), "r"(_p) ); \
- asm volatile ("lwbrx %0, %1, %2" : "=r"(hi) : "b%"(4), "r"(_p) ); \
- ((uint64_t)hi << 32) | (uint64_t)lo; } )
-
-/* ----------------------------------------------------------------------- */
-#elif (__GNUC__ && (__ppc__ || __PPC__))
-/* ----------------------------------------------------------------------- */
-
-#define GET_REVERSED_64(p) \
- ({ uint32_t hi, lo, *_p = (uint32_t *)(p); \
- asm volatile ("lwbrx %0, %1, %2" : "=r"(lo) : "b%"(0), "r"(_p) ); \
- asm volatile ("lwbrx %0, %1, %2" : "=r"(hi) : "b%"(4), "r"(_p) ); \
- ((uint64_t)hi << 32) | (uint64_t)lo; } )
-
-/* ----------------------------------------------------------------------- */
-#elif (__GNUC__ && (__ARMEL__ || __ARM__))
-/* ----------------------------------------------------------------------- */
-
-#define bswap32(v) \
-({ uint32_t tmp,out; \
- asm volatile( \
- "eor %1, %2, %2, ror #16\n" \
- "bic %1, %1, #0x00ff0000\n" \
- "mov %0, %2, ror #8\n" \
- "eor %0, %0, %1, lsr #8" \
- : "=r" (out), "=&r" (tmp) \
- : "r" (v)); \
- out;})
-
/* ----------------------------------------------------------------------- */
#elif _MSC_VER
/* ----------------------------------------------------------------------- */
(rh) += (ih) + ((rl) < (_il)); \
}
-#if _MSC_VER >= 1300
-#define GET_REVERSED_64(p) _byteswap_uint64(*(uint64_t *)(p))
-#pragma intrinsic(_byteswap_uint64)
-#endif
-
#if _MSC_VER >= 1400 && \
(!defined(__INTEL_COMPILER) || __INTEL_COMPILER >= 1000)
#define MUL32(i1,i2) (__emulu((uint32_t)(i1),(uint32_t)(i2)))
}
#endif
-#ifndef GET_REVERSED_64
-#ifndef bswap64
-#ifndef bswap32
-#define bswap32(x) \
- ({ uint32_t bsx = (x); \
- ((((bsx) & 0xff000000u) >> 24) | (((bsx) & 0x00ff0000u) >> 8) | \
- (((bsx) & 0x0000ff00u) << 8) | (((bsx) & 0x000000ffu) << 24)); })
-#endif
-#define bswap64(x) \
- ({ union { uint64_t ll; uint32_t l[2]; } w, r; \
- w.ll = (x); \
- r.l[0] = bswap32 (w.l[1]); \
- r.l[1] = bswap32 (w.l[0]); \
- r.ll; })
-#endif
-#define GET_REVERSED_64(p) bswap64(*(uint64_t *)(p))
-#endif
-
/* ----------------------------------------------------------------------- */
#if (VMAC_PREFER_BIG_ENDIAN)
#if (VMAC_ARCH_BIG_ENDIAN)
# define get64BE(ptr) (*(uint64_t *)(ptr))
-# define get64LE(ptr) GET_REVERSED_64(ptr)
+# define get64LE(ptr) bswap64(*(uint64_t *)(ptr))
#else /* assume little-endian */
-# define get64BE(ptr) GET_REVERSED_64(ptr)
+# define get64BE(ptr) bswap64(*(uint64_t *)(ptr))
# define get64LE(ptr) (*(uint64_t *)(ptr))
#endif