ia64/xen-unstable
changeset 17481:d178c5ee6822
x86_emulate: Emulate MMX movq instructions.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Apr 17 10:46:54 2008 +0100 (2008-04-17) |
parents | a38a41de0800 |
children | e14fee5ecc80 |
files | xen/arch/x86/hvm/emulate.c xen/arch/x86/x86_emulate/x86_emulate.c xen/arch/x86/x86_emulate/x86_emulate.h |
line diff
1.1 --- a/xen/arch/x86/hvm/emulate.c Wed Apr 16 16:42:47 2008 +0100 1.2 +++ b/xen/arch/x86/hvm/emulate.c Thu Apr 17 10:46:54 2008 +0100 1.3 @@ -674,16 +674,33 @@ static int hvmemul_inject_sw_interrupt( 1.4 return X86EMUL_OKAY; 1.5 } 1.6 1.7 -static void hvmemul_get_fpu( 1.8 +static int hvmemul_get_fpu( 1.9 void (*exception_callback)(void *, struct cpu_user_regs *), 1.10 void *exception_callback_arg, 1.11 + enum x86_emulate_fpu_type type, 1.12 struct x86_emulate_ctxt *ctxt) 1.13 { 1.14 struct vcpu *curr = current; 1.15 + 1.16 + switch ( type ) 1.17 + { 1.18 + case X86EMUL_FPU_fpu: 1.19 + break; 1.20 + case X86EMUL_FPU_mmx: 1.21 + if ( !cpu_has_mmx ) 1.22 + return X86EMUL_UNHANDLEABLE; 1.23 + break; 1.24 + default: 1.25 + return X86EMUL_UNHANDLEABLE; 1.26 + } 1.27 + 1.28 if ( !curr->fpu_dirtied ) 1.29 hvm_funcs.fpu_dirty_intercept(); 1.30 + 1.31 curr->arch.hvm_vcpu.fpu_exception_callback = exception_callback; 1.32 curr->arch.hvm_vcpu.fpu_exception_callback_arg = exception_callback_arg; 1.33 + 1.34 + return X86EMUL_OKAY; 1.35 } 1.36 1.37 static void hvmemul_put_fpu(
2.1 --- a/xen/arch/x86/x86_emulate/x86_emulate.c Wed Apr 16 16:42:47 2008 +0100 2.2 +++ b/xen/arch/x86/x86_emulate/x86_emulate.c Thu Apr 17 10:46:54 2008 +0100 2.3 @@ -195,9 +195,9 @@ static uint8_t twobyte_table[256] = { 2.4 /* 0x50 - 0x5F */ 2.5 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.6 /* 0x60 - 0x6F */ 2.7 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.8 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps|ModRM, 2.9 /* 0x70 - 0x7F */ 2.10 - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.11 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps|ModRM, 2.12 /* 0x80 - 0x87 */ 2.13 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 2.14 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 2.15 @@ -558,41 +558,48 @@ static void fpu_handle_exception(void *_ 2.16 regs->eip += fic->insn_bytes; 2.17 } 2.18 2.19 +#define get_fpu(_type, _fic) \ 2.20 +do{ (_fic)->exn_raised = 0; \ 2.21 + fail_if(ops->get_fpu == NULL); \ 2.22 + rc = ops->get_fpu(fpu_handle_exception, _fic, _type, ctxt); \ 2.23 + if ( rc ) goto done; \ 2.24 +} while (0) 2.25 +#define put_fpu(_fic) \ 2.26 +do{ \ 2.27 + if ( ops->put_fpu != NULL ) \ 2.28 + ops->put_fpu(ctxt); \ 2.29 + generate_exception_if((_fic)->exn_raised, EXC_MF, -1); \ 2.30 +} while (0) 2.31 + 2.32 #define emulate_fpu_insn(_op) \ 2.33 -do{ struct fpu_insn_ctxt fic = { 0 }; \ 2.34 - fail_if(ops->get_fpu == NULL); \ 2.35 - ops->get_fpu(fpu_handle_exception, &fic, ctxt); \ 2.36 +do{ struct fpu_insn_ctxt fic; \ 2.37 + get_fpu(X86EMUL_FPU_fpu, &fic); \ 2.38 asm volatile ( \ 2.39 "movb $2f-1f,%0 \n" \ 2.40 "1: " _op " \n" \ 2.41 "2: \n" \ 2.42 : "=m" (fic.insn_bytes) : : "memory" ); \ 2.43 - ops->put_fpu(ctxt); \ 2.44 - generate_exception_if(fic.exn_raised, EXC_MF, -1); \ 2.45 + put_fpu(&fic); \ 2.46 } while (0) 2.47 2.48 #define emulate_fpu_insn_memdst(_op, _arg) \ 2.49 -do{ struct fpu_insn_ctxt fic = { 0 }; \ 2.50 - fail_if(ops->get_fpu == NULL); \ 2.51 - ops->get_fpu(fpu_handle_exception, &fic, ctxt); \ 2.52 +do{ struct fpu_insn_ctxt fic; \ 2.53 + get_fpu(X86EMUL_FPU_fpu, &fic); \ 2.54 asm volatile ( \ 2.55 "movb $2f-1f,%0 \n" \ 2.56 "1: " _op " %1 \n" \ 2.57 "2: \n" \ 2.58 : "=m" (fic.insn_bytes), "=m" (_arg) \ 2.59 : : "memory" ); \ 2.60 - ops->put_fpu(ctxt); \ 2.61 - generate_exception_if(fic.exn_raised, EXC_MF, -1); \ 2.62 + put_fpu(&fic); \ 2.63 } while (0) 2.64 2.65 #define emulate_fpu_insn_stub(_bytes...) \ 2.66 do{ uint8_t stub[] = { _bytes, 0xc3 }; \ 2.67 struct fpu_insn_ctxt fic = { .insn_bytes = sizeof(stub)-1 }; \ 2.68 - fail_if(ops->get_fpu == NULL); \ 2.69 - ops->get_fpu(fpu_handle_exception, &fic, ctxt); \ 2.70 + get_fpu(X86EMUL_FPU_fpu, &fic); \ 2.71 (*(void(*)(void))stub)(); \ 2.72 - ops->put_fpu(ctxt); \ 2.73 - generate_exception_if(fic.exn_raised, EXC_MF, -1); \ 2.74 + put_fpu(&fic); \ 2.75 } while (0) 2.76 2.77 static unsigned long __get_rep_prefix( 2.78 @@ -3369,6 +3376,44 @@ x86_emulate( 2.79 break; 2.80 } 2.81 2.82 + case 0x6f: /* movq mm/m64,mm */ { 2.83 + uint8_t stub[] = { 0x0f, 0x6f, modrm, 0xc3 }; 2.84 + struct fpu_insn_ctxt fic = { .insn_bytes = sizeof(stub)-1 }; 2.85 + uint64_t val; 2.86 + if ( ea.type == OP_MEM ) 2.87 + { 2.88 + unsigned long lval, hval; 2.89 + if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &lval, 4, ctxt)) || 2.90 + (rc = ops->read(ea.mem.seg, ea.mem.off+4, &hval, 4, ctxt)) ) 2.91 + goto done; 2.92 + val = ((uint64_t)hval << 32) | (uint32_t)lval; 2.93 + stub[2] = modrm & 0x38; /* movq (%eax),%mmN */ 2.94 + } 2.95 + get_fpu(X86EMUL_FPU_mmx, &fic); 2.96 + asm volatile ( "call *%0" : : "r" (stub), "a" (&val) : "memory" ); 2.97 + put_fpu(&fic); 2.98 + break; 2.99 + } 2.100 + 2.101 + case 0x7f: /* movq mm,mm/m64 */ { 2.102 + uint8_t stub[] = { 0x0f, 0x7f, modrm, 0xc3 }; 2.103 + struct fpu_insn_ctxt fic = { .insn_bytes = sizeof(stub)-1 }; 2.104 + uint64_t val; 2.105 + if ( ea.type == OP_MEM ) 2.106 + stub[2] = modrm & 0x38; /* movq %mmN,(%eax) */ 2.107 + get_fpu(X86EMUL_FPU_mmx, &fic); 2.108 + asm volatile ( "call *%0" : : "r" (stub), "a" (&val) : "memory" ); 2.109 + put_fpu(&fic); 2.110 + if ( ea.type == OP_MEM ) 2.111 + { 2.112 + unsigned long lval = (uint32_t)val, hval = (uint32_t)(val >> 32); 2.113 + if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0, lval, 4, ctxt)) || 2.114 + (rc = ops->write(ea.mem.seg, ea.mem.off+4, hval, 4, ctxt)) ) 2.115 + goto done; 2.116 + } 2.117 + break; 2.118 + } 2.119 + 2.120 case 0x80 ... 0x8f: /* jcc (near) */ { 2.121 int rel = (((op_bytes == 2) && !mode_64bit()) 2.122 ? (int32_t)insn_fetch_type(int16_t)
3.1 --- a/xen/arch/x86/x86_emulate/x86_emulate.h Wed Apr 16 16:42:47 2008 +0100 3.2 +++ b/xen/arch/x86/x86_emulate/x86_emulate.h Thu Apr 17 10:46:54 2008 +0100 3.3 @@ -95,6 +95,12 @@ struct segment_register { 3.4 /* (cmpxchg accessor): CMPXCHG failed. Maps to X86EMUL_RETRY in caller. */ 3.5 #define X86EMUL_CMPXCHG_FAILED 3 3.6 3.7 +/* FPU sub-types which may be requested via ->get_fpu(). */ 3.8 +enum x86_emulate_fpu_type { 3.9 + X86EMUL_FPU_fpu, /* Standard FPU coprocessor instruction set */ 3.10 + X86EMUL_FPU_mmx /* MMX instruction set (%mm0-%mm7) */ 3.11 +}; 3.12 + 3.13 /* 3.14 * These operations represent the instruction emulator's interface to memory. 3.15 * 3.16 @@ -347,9 +353,10 @@ struct x86_emulate_ops 3.17 * @exn_callback: On any FPU or SIMD exception, pass control to 3.18 * (*exception_callback)(exception_callback_arg, regs). 3.19 */ 3.20 - void (*get_fpu)( 3.21 + int (*get_fpu)( 3.22 void (*exception_callback)(void *, struct cpu_user_regs *), 3.23 void *exception_callback_arg, 3.24 + enum x86_emulate_fpu_type type, 3.25 struct x86_emulate_ctxt *ctxt); 3.26 3.27 /* put_fpu: Relinquish the FPU. Unhook from FPU/SIMD exception handlers. */