From bf08a8a08a2ee8cef5fb7b3b274b0e09123a41bd Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 3 Jan 2020 17:04:41 +0100 Subject: [PATCH] x86/HVM: use single (atomic) MOV for aligned emulated writes Using memcpy() may result in multiple individual byte accesses (dependening how memcpy() is implemented and how the resulting insns, e.g. REP MOVSB, get carried out in hardware), which isn't what we want/need for carrying out guest insns as correctly as possible. Fall back to memcpy() only for accesses not 2, 4, or 8 bytes in size. Suggested-by: Andrew Cooper Signed-off-by: Jan Beulich Acked-by: Andrew Cooper --- xen/arch/x86/hvm/emulate.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 637034b6a1..a3aa33a44f 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1342,7 +1342,14 @@ static int hvmemul_write( if ( !mapping ) return linear_write(addr, bytes, p_data, pfec, hvmemul_ctxt); - memcpy(mapping, p_data, bytes); + /* Where possible use single (and hence generally atomic) MOV insns. */ + switch ( bytes ) + { + case 2: write_u16_atomic(mapping, *(uint16_t *)p_data); break; + case 4: write_u32_atomic(mapping, *(uint32_t *)p_data); break; + case 8: write_u64_atomic(mapping, *(uint64_t *)p_data); break; + default: memcpy(mapping, p_data, bytes); break; + } hvmemul_unmap_linear_addr(mapping, addr, bytes, hvmemul_ctxt); -- 2.39.5