[INSTR_CPUID] = { X86EMUL_OPC(0x0f, 0xa2) },
};
-int __get_instruction_length_from_list(struct vcpu *v,
- const enum instruction_index *list, unsigned int list_count)
+/*
+ * Early processors with SVM didn't have the NextRIP feature, meaning that
+ * when we take a fault-style VMExit, we have to decode the instruction stream
+ * to calculate how many bytes to move %rip forwards by.
+ *
+ * In debug builds, always compare the hardware reported instruction length
+ * (if available) with the result from x86_decode_insn().
+ */
+unsigned int svm_get_insn_len(struct vcpu *v, enum instruction_index insn)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
struct hvm_emulate_ctxt ctxt;
struct x86_emulate_state *state;
- unsigned long inst_len, j;
+ unsigned long nrip_len, emul_len;
unsigned int modrm_rm, modrm_reg;
int modrm_mod;
- /*
- * In debug builds, always use x86_decode_insn() and compare with
- * hardware.
- */
-#ifdef NDEBUG
- if ( (inst_len = svm_nextrip_insn_length(v)) > MAX_INST_LEN )
- gprintk(XENLOG_WARNING, "NRip reported inst_len %lu\n", inst_len);
- else if ( inst_len != 0 )
- return inst_len;
+ nrip_len = svm_nextrip_insn_length(v);
- if ( vmcb->exitcode == VMEXIT_IOIO )
- return vmcb->exitinfo2 - vmcb->rip;
+#ifdef NDEBUG
+ if ( nrip_len > MAX_INST_LEN )
+ gprintk(XENLOG_WARNING, "NRip reported inst_len %lu\n", nrip_len);
+ else if ( nrip_len != 0 )
+ return nrip_len;
#endif
ASSERT(v == current);
if ( IS_ERR_OR_NULL(state) )
return 0;
- inst_len = x86_insn_length(state, &ctxt.ctxt);
+ emul_len = x86_insn_length(state, &ctxt.ctxt);
modrm_mod = x86_insn_modrm(state, &modrm_rm, &modrm_reg);
x86_emulate_free_state(state);
+
#ifndef NDEBUG
- if ( vmcb->exitcode == VMEXIT_IOIO )
- j = vmcb->exitinfo2 - vmcb->rip;
- else
- j = svm_nextrip_insn_length(v);
- if ( j && j != inst_len )
+ if ( nrip_len && nrip_len != emul_len )
{
gprintk(XENLOG_WARNING, "insn-len[%02x]=%lu (exp %lu)\n",
- ctxt.ctxt.opcode, inst_len, j);
- return j;
+ ctxt.ctxt.opcode, nrip_len, emul_len);
+ return nrip_len;
}
#endif
- for ( j = 0; j < list_count; j++ )
+ if ( insn >= ARRAY_SIZE(opc_tab) )
{
- unsigned int instr = list[j];
-
- if ( instr >= ARRAY_SIZE(opc_tab) )
- {
- ASSERT_UNREACHABLE();
- break;
- }
- if ( opc_tab[instr].opcode == ctxt.ctxt.opcode )
- {
- if ( !opc_tab[instr].modrm.mod )
- return inst_len;
-
- if ( modrm_mod == opc_tab[instr].modrm.mod &&
- (modrm_rm & 7) == opc_tab[instr].modrm.rm &&
- (modrm_reg & 7) == opc_tab[instr].modrm.reg )
- return inst_len;
- }
+ ASSERT_UNREACHABLE();
+ return 0;
+ }
+
+ if ( opc_tab[insn].opcode == ctxt.ctxt.opcode )
+ {
+ if ( !opc_tab[insn].modrm.mod )
+ return emul_len;
+
+ if ( modrm_mod == opc_tab[insn].modrm.mod &&
+ (modrm_rm & 7) == opc_tab[insn].modrm.rm &&
+ (modrm_reg & 7) == opc_tab[insn].modrm.reg )
+ return emul_len;
}
gdprintk(XENLOG_WARNING,
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- inst_len = __get_instruction_length(v, INSTR_VMRUN);
- if (inst_len == 0) {
+ inst_len = svm_get_insn_len(v, INSTR_VMRUN);
+ if ( inst_len == 0 )
+ {
svm->ns_vmexit.exitcode = VMEXIT_SHUTDOWN;
return -1;
}
return;
}
- if ( (inst_len = __get_instruction_length(v, INSTR_STGI)) == 0 )
+ if ( (inst_len = svm_get_insn_len(v, INSTR_STGI)) == 0 )
return;
nestedsvm_vcpu_stgi(v);
return;
}
- if ( (inst_len = __get_instruction_length(v, INSTR_CLGI)) == 0 )
+ if ( (inst_len = svm_get_insn_len(v, INSTR_CLGI)) == 0 )
return;
nestedsvm_vcpu_clgi(v);
{
struct vcpu *curr = current;
bool rdmsr = curr->arch.hvm.svm.vmcb->exitinfo1 == 0;
- int rc, inst_len = __get_instruction_length(
- curr, rdmsr ? INSTR_RDMSR : INSTR_WRMSR);
+ int rc, inst_len = svm_get_insn_len(curr, rdmsr ? INSTR_RDMSR
+ : INSTR_WRMSR);
if ( inst_len == 0 )
return;
{
unsigned int inst_len;
- if ( (inst_len = __get_instruction_length(current, INSTR_HLT)) == 0 )
+ if ( (inst_len = svm_get_insn_len(current, INSTR_HLT)) == 0 )
return;
__update_guest_eip(regs, inst_len);
{
struct vcpu *curr = current;
const struct domain *currd = curr->domain;
- enum instruction_index insn = rdtscp ? INSTR_RDTSCP : INSTR_RDTSC;
unsigned int inst_len;
if ( rdtscp && !currd->arch.cpuid->extd.rdtscp )
return;
}
- if ( (inst_len = __get_instruction_length(curr, insn)) == 0 )
+ if ( (inst_len = svm_get_insn_len(curr, rdtscp ? INSTR_RDTSCP
+ : INSTR_RDTSC)) == 0 )
return;
__update_guest_eip(regs, inst_len);
{
unsigned int inst_len;
- if ( (inst_len = __get_instruction_length(current, INSTR_PAUSE)) == 0 )
+ if ( (inst_len = svm_get_insn_len(current, INSTR_PAUSE)) == 0 )
return;
__update_guest_eip(regs, inst_len);
unsigned int inst_len;
struct page_info *page;
- if ( (inst_len = __get_instruction_length(v, INSTR_VMLOAD)) == 0 )
+ if ( (inst_len = svm_get_insn_len(v, INSTR_VMLOAD)) == 0 )
return;
if ( !nsvm_efer_svm_enabled(v) )
unsigned int inst_len;
struct page_info *page;
- if ( (inst_len = __get_instruction_length(v, INSTR_VMSAVE)) == 0 )
+ if ( (inst_len = svm_get_insn_len(v, INSTR_VMSAVE)) == 0 )
return;
if ( !nsvm_efer_svm_enabled(v) )
flush_all(FLUSH_CACHE);
}
-static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
+static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs,
+ bool invld)
{
- static const enum instruction_index list[] = { INSTR_INVD, INSTR_WBINVD };
- int inst_len;
+ unsigned int inst_len = svm_get_insn_len(current, invld ? INSTR_INVD
+ : INSTR_WBINVD);
- inst_len = __get_instruction_length_from_list(
- current, list, ARRAY_SIZE(list));
if ( inst_len == 0 )
return;
else
{
trap_type = X86_EVENTTYPE_PRI_SW_EXCEPTION;
- inst_len = __get_instruction_length(v, INSTR_ICEBP);
+ inst_len = svm_get_insn_len(v, INSTR_ICEBP);
}
rc = hvm_monitor_debug(regs->rip,
break;
case VMEXIT_EXCEPTION_BP:
- inst_len = __get_instruction_length(v, INSTR_INT3);
+ inst_len = svm_get_insn_len(v, INSTR_INT3);
if ( inst_len == 0 )
break;
case VMEXIT_INVD:
case VMEXIT_WBINVD:
- svm_vmexit_do_invalidate_cache(regs);
+ svm_vmexit_do_invalidate_cache(regs, exit_reason == VMEXIT_INVD);
break;
case VMEXIT_TASK_SWITCH: {
case VMEXIT_CPUID:
{
- unsigned int inst_len = __get_instruction_length(v, INSTR_CPUID);
+ unsigned int inst_len = svm_get_insn_len(v, INSTR_CPUID);
int rc = 0;
if ( inst_len == 0 )
hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
break;
}
- if ( (inst_len = __get_instruction_length(v, INSTR_INVLPGA)) == 0 )
+ if ( (inst_len = svm_get_insn_len(v, INSTR_INVLPGA)) == 0 )
break;
svm_invlpga_intercept(v, regs->rax, regs->ecx);
__update_guest_eip(regs, inst_len);
break;
case VMEXIT_VMMCALL:
- if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
+ if ( (inst_len = svm_get_insn_len(v, INSTR_VMCALL)) == 0 )
break;
BUG_ON(vcpu_guestmode);
HVMTRACE_1D(VMMCALL, regs->eax);
case VMEXIT_XSETBV:
if ( vmcb_get_cpl(vmcb) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- else if ( (inst_len = __get_instruction_length(v, INSTR_XSETBV)) &&
+ else if ( (inst_len = svm_get_insn_len(v, INSTR_XSETBV)) &&
hvm_handle_xsetbv(regs->ecx, msr_fold(regs)) == X86EMUL_OKAY )
__update_guest_eip(regs, inst_len);
break;
struct vcpu;
-int __get_instruction_length_from_list(
- struct vcpu *, const enum instruction_index *, unsigned int list_count);
-
-static inline int __get_instruction_length(
- struct vcpu *v, enum instruction_index instr)
-{
- return __get_instruction_length_from_list(v, &instr, 1);
-}
+unsigned int svm_get_insn_len(struct vcpu *v, enum instruction_index instr);
#endif /* __ASM_X86_HVM_SVM_EMULATE_H__ */