static uint64_t osvw_length, osvw_status;
static DEFINE_SPINLOCK(osvw_lock);
+/* Only crash the guest if the problem originates in kernel mode. */
+static void svm_crash_or_fault(struct vcpu *v)
+{
+ if ( vmcb_get_cpl(v->arch.hvm_svm.vmcb) )
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ else
+ domain_crash(v->domain);
+}
+
void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len)
{
struct vcpu *curr = current;
if ( unlikely(inst_len > 15) )
{
gdprintk(XENLOG_ERR, "Bad instruction length %u\n", inst_len);
- domain_crash(curr->domain);
+ svm_crash_or_fault(curr);
return;
}
"exitinfo1 = %#"PRIx64", exitinfo2 = %#"PRIx64"\n",
exit_reason,
(u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2);
- if ( vmcb_get_cpl(vmcb) )
- hvm_inject_hw_exception(TRAP_invalid_op,
- HVM_DELIVER_NO_ERROR_CODE);
- else
- domain_crash(v->domain);
+ svm_crash_or_fault(v);
break;
}
passive_domain_destroy(v);
}
+/* Only crash the guest if the problem originates in kernel mode. */
+static void vmx_crash_or_fault(struct vcpu *v)
+{
+ struct segment_register ss;
+
+ vmx_get_segment_register(v, x86_seg_ss, &ss);
+ if ( ss.attr.fields.dpl )
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ else
+ domain_crash(v->domain);
+}
+
static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
static const u32 msr_index[] =
vmcs_dump_vcpu(curr);
printk("**************************************\n");
- domain_crash(curr->domain);
+ vmx_crash_or_fault(curr);
}
void vmx_enter_realmode(struct cpu_user_regs *regs)
/* fall through */
default:
exit_and_crash:
- {
- struct segment_register ss;
-
- gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n",
- exit_reason);
-
- vmx_get_segment_register(v, x86_seg_ss, &ss);
- if ( ss.attr.fields.dpl )
- hvm_inject_hw_exception(TRAP_invalid_op,
- HVM_DELIVER_NO_ERROR_CODE);
- else
- domain_crash(v->domain);
- }
+ gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n", exit_reason);
+ vmx_crash_or_fault(v);
break;
}