bool lock,
struct x86_emulate_ctxt *ctxt)
{
- /* Fix this in case the guest is really relying on r-m-w atomicity. */
- return hvmemul_write(seg, offset, p_new, bytes, ctxt);
+ struct hvm_emulate_ctxt *hvmemul_ctxt =
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+ struct vcpu *curr = current;
+ unsigned long addr, reps = 1;
+ uint32_t pfec = PFEC_page_present | PFEC_write_access;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ int rc;
+ void *mapping = NULL;
+
+ rc = hvmemul_virtual_to_linear(
+ seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+
+ if ( is_x86_system_segment(seg) )
+ pfec |= PFEC_implicit;
+ else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
+ pfec |= PFEC_user_mode;
+
+ mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
+ if ( IS_ERR(mapping) )
+ return ~PTR_ERR(mapping);
+
+ if ( !mapping )
+ {
+ /* Fix this in case the guest is really relying on r-m-w atomicity. */
+ return hvmemul_linear_mmio_write(addr, bytes, p_new, pfec,
+ hvmemul_ctxt,
+ vio->mmio_access.write_access &&
+ vio->mmio_gla == (addr & PAGE_MASK));
+ }
+
+ switch ( bytes )
+ {
+ case 1: case 2: case 4: case 8:
+ {
+ unsigned long old = 0, new = 0, cur;
+
+ memcpy(&old, p_old, bytes);
+ memcpy(&new, p_new, bytes);
+ if ( lock )
+ cur = __cmpxchg(mapping, old, new, bytes);
+ else
+ cur = cmpxchg_local_(mapping, old, new, bytes);
+ if ( cur != old )
+ {
+ memcpy(p_old, &cur, bytes);
+ rc = X86EMUL_CMPXCHG_FAILED;
+ }
+ break;
+ }
+
+ case 16:
+ if ( cpu_has_cx16 )
+ {
+ __uint128_t *old = p_old, cur;
+
+ if ( lock )
+ cur = __cmpxchg16b(mapping, old, p_new);
+ else
+ cur = cmpxchg16b_local_(mapping, old, p_new);
+ if ( cur != *old )
+ {
+ *old = cur;
+ rc = X86EMUL_CMPXCHG_FAILED;
+ }
+ }
+ else
+ rc = X86EMUL_UNHANDLEABLE;
+ break;
+
+ default:
+ ASSERT_UNREACHABLE();
+ rc = X86EMUL_UNHANDLEABLE;
+ break;
+ }
+
+ hvmemul_unmap_linear_addr(mapping, addr, bytes, hvmemul_ctxt);
+
+ return rc;
}
static int hvmemul_validate(
return old;
}
+static always_inline unsigned long cmpxchg_local_(
+ void *ptr, unsigned long old, unsigned long new, unsigned int size)
+{
+ unsigned long prev = ~old;
+
+ switch ( size )
+ {
+ case 1:
+ asm volatile ( "cmpxchgb %b2, %1"
+ : "=a" (prev), "+m" (*(uint8_t *)ptr)
+ : "q" (new), "0" (old) );
+ break;
+ case 2:
+ asm volatile ( "cmpxchgw %w2, %1"
+ : "=a" (prev), "+m" (*(uint16_t *)ptr)
+ : "r" (new), "0" (old) );
+ break;
+ case 4:
+ asm volatile ( "cmpxchgl %k2, %1"
+ : "=a" (prev), "+m" (*(uint32_t *)ptr)
+ : "r" (new), "0" (old) );
+ break;
+ case 8:
+ asm volatile ( "cmpxchgq %2, %1"
+ : "=a" (prev), "+m" (*(uint64_t *)ptr)
+ : "r" (new), "0" (old) );
+ break;
+ }
+
+ return prev;
+}
+
#define cmpxchgptr(ptr,o,n) ({ \
const __typeof__(**(ptr)) *__o = (o); \
__typeof__(**(ptr)) *__n = (n); \
return prev.raw;
}
+static always_inline __uint128_t cmpxchg16b_local_(
+ void *ptr, const __uint128_t *oldp, const __uint128_t *newp)
+{
+ union {
+ struct { uint64_t lo, hi; };
+ __uint128_t raw;
+ } new = { .raw = *newp }, old = { .raw = *oldp }, prev;
+
+ ASSERT(cpu_has_cx16);
+
+ /* Don't use "=A" here - clang can't deal with that. */
+ asm volatile ( "cmpxchg16b %2"
+ : "=d" (prev.hi), "=a" (prev.lo), "+m" (*(__uint128_t *)ptr)
+ : "c" (new.hi), "b" (new.lo), "0" (old.hi), "1" (old.lo) );
+
+ return prev.raw;
+}
+
#define cmpxchg16b(ptr, o, n) ({ \
volatile void *_p = (ptr); \
ASSERT(!((unsigned long)_p & 0xf)); \