unsigned int *edx,
struct x86_emulate_ctxt *ctxt)
{
+ /*
+ * x86_emulate uses this function to query CPU features for its own internal
+ * use. Make sure we're actually emulating CPUID before emulating CPUID
+ * faulting.
+ */
+ if ( ctxt->opcode == X86EMUL_OPC(0x0f, 0xa2) &&
+ hvm_check_cpuid_faulting(current) )
+ {
+ struct hvm_emulate_ctxt *hvmemul_ctxt =
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+
+ hvmemul_ctxt->exn_pending = 1;
+ hvmemul_ctxt->trap.vector = TRAP_gp_fault;
+ hvmemul_ctxt->trap.type = X86_EVENTTYPE_HW_EXCEPTION;
+ hvmemul_ctxt->trap.error_code = 0;
+ hvmemul_ctxt->trap.insn_len = 0;
+
+ return X86EMUL_EXCEPTION;
+ }
+
hvm_funcs.cpuid_intercept(eax, ebx, ecx, edx);
return X86EMUL_OKAY;
}
}
}
+bool hvm_check_cpuid_faulting(struct vcpu *v)
+{
+ struct segment_register sreg;
+
+ if ( !v->arch.cpuid_faulting )
+ return false;
+
+ hvm_get_segment_register(v, x86_seg_ss, &sreg);
+ if ( sreg.attr.fields.dpl == 0 )
+ return false;
+
+ return true;
+}
+
static uint64_t _hvm_rdtsc_intercept(void)
{
struct vcpu *curr = current;
unsigned int eax, ebx, ecx, edx;
unsigned int leaf, subleaf;
+ if ( hvm_check_cpuid_faulting(current) )
+ {
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ return 1; /* Don't advance the guest IP! */
+ }
+
eax = regs->eax;
ebx = regs->ebx;
ecx = regs->ecx;
break;
case MSR_INTEL_PLATFORM_INFO:
- if ( rdmsr_safe(MSR_INTEL_PLATFORM_INFO, *msr_content) )
- goto gp_fault;
+ *msr_content = MSR_PLATFORM_INFO_CPUID_FAULTING;
+ break;
+
+ case MSR_INTEL_MISC_FEATURES_ENABLES:
*msr_content = 0;
+ if ( current->arch.cpuid_faulting )
+ *msr_content |= MSR_MISC_FEATURES_CPUID_FAULTING;
break;
default:
goto gp_fault;
break;
+ case MSR_INTEL_MISC_FEATURES_ENABLES:
+ if ( msr_content & ~MSR_MISC_FEATURES_CPUID_FAULTING )
+ goto gp_fault;
+ v->arch.cpuid_faulting =
+ !!(msr_content & MSR_MISC_FEATURES_CPUID_FAULTING);
+ break;
+
default:
if ( passive_domain_do_wrmsr(msr, msr_content) )
return X86EMUL_OKAY;
}
if ( memcmp(instr, "\xf\xa2", sizeof(instr)) )
return 0;
+
+ /* If cpuid faulting is enabled and CPL>0 inject a #GP in place of #UD. */
+ if ( current->arch.cpuid_faulting && !guest_kernel_mode(current, regs) )
+ {
+ regs->eip = eip;
+ do_guest_trap(TRAP_gp_fault, regs);
+ return EXCRET_fault_fixed;
+ }
+
eip += sizeof(instr);
pv_cpuid(regs);
rdmsr_safe(MSR_INTEL_PLATFORM_INFO, *val) )
break;
*val = 0;
+ if ( this_cpu(cpuid_faulting_enabled) )
+ *val |= MSR_PLATFORM_INFO_CPUID_FAULTING;
+ return X86EMUL_OKAY;
+
+ case MSR_INTEL_MISC_FEATURES_ENABLES:
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, *val) )
+ break;
+ *val = 0;
+ if ( curr->arch.cpuid_faulting )
+ *val |= MSR_MISC_FEATURES_CPUID_FAULTING;
return X86EMUL_OKAY;
case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
break;
return X86EMUL_OKAY;
+ case MSR_INTEL_MISC_FEATURES_ENABLES:
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ (val & ~MSR_MISC_FEATURES_CPUID_FAULTING) ||
+ rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, temp) )
+ break;
+ if ( (val & MSR_MISC_FEATURES_CPUID_FAULTING) &&
+ !this_cpu(cpuid_faulting_enabled) )
+ break;
+ curr->arch.cpuid_faulting = !!(val & MSR_MISC_FEATURES_CPUID_FAULTING);
+ return X86EMUL_OKAY;
+
case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
break;
case 0xa2: /* CPUID */
+ /* If cpuid faulting is enabled and CPL>0 leave the #GP untouched. */
+ if ( v->arch.cpuid_faulting && !guest_kernel_mode(v, regs) )
+ goto fail;
+
pv_cpuid(regs);
break;
* and thus should be saved/restored. */
bool_t nonlazy_xstate_used;
+ /* Has the guest enabled CPUID faulting? */
+ bool cpuid_faulting;
+
/*
* The SMAP check policy when updating runstate_guest(v) and the
* secondary system time.
uint32_t *ecx, uint32_t *edx);
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
+bool hvm_check_cpuid_faulting(struct vcpu *v);
void hvm_migrate_timers(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
void hvm_migrate_pirqs(struct vcpu *v);