* First pass:
* - Perform max_leaf/subleaf calculations. Out-of-range leaves return
* all zeros, following the AMD model.
+ * - Fill in *res for leaves no longer handled on the legacy path.
* - Dispatch the virtualised leaves to their respective handlers.
*/
switch ( leaf )
case 0x7:
if ( subleaf > p->feat.max_subleaf )
return;
+
+ BUG_ON(subleaf >= ARRAY_SIZE(p->feat.raw));
+ *res = p->feat.raw[subleaf];
break;
case XSTATE_CPUID:
if ( subleaf > ARRAY_SIZE(p->xstate.raw) )
return;
- break;
+
+ /* Fallthrough. */
+ default:
+ goto legacy;
}
break;
case 0x80000000 ... 0x80000000 + CPUID_GUEST_NR_EXTD - 1:
if ( leaf > p->extd.max_leaf )
return;
- break;
+ goto legacy;
default:
return;
}
+ /*
+ * Skip dynamic adjustments if we are in the wrong context.
+ *
+ * All dynamic adjustments depends on current register state, which will
+ * be stale if the vcpu is running elsewhere. It is simpler, quicker, and
+ * more reliable for the caller to do nothing (consistently) than to hand
+ * back stale data which it can't use safely.
+ */
+ if ( v != current )
+ return;
+
+ /*
+ * Second pass:
+ * - Dynamic adjustments
+ */
+ switch ( leaf )
+ {
+ case 0x7:
+ switch ( subleaf )
+ {
+ case 0:
+ /* OSPKE clear in policy. Fast-forward CR4 back in. */
+ if ( (is_pv_domain(d)
+ ? v->arch.pv_vcpu.ctrlreg[4]
+ : v->arch.hvm_vcpu.guest_cr[4]) & X86_CR4_PKE )
+ res->c |= cpufeat_mask(X86_FEATURE_OSPKE);
+ break;
+ }
+ break;
+ }
+
+ /* Done. */
+ return;
+
+ legacy:
/* {hvm,pv}_cpuid() have this expectation. */
ASSERT(v == current);
break;
- case 0x7:
- if ( count == 0 )
- {
- *ebx = p->feat._7b0;
- *ecx = p->feat._7c0;
- *edx = p->feat._7d0;
-
- /* OSPKE clear in policy. Fast-forward CR4 back in. */
- if ( v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PKE )
- *ecx |= cpufeat_mask(X86_FEATURE_OSPKE);
- }
- break;
-
case 0xb:
/* Fix the x2APIC identifier. */
*edx = v->vcpu_id * 2;
else
*eax = 0;
break;
+
+ case 0x7:
+ ASSERT_UNREACHABLE();
+ /* Now handled in guest_cpuid(). */
}
}
}
break;
- case 0x00000007:
- if ( subleaf == 0 )
- {
- b = p->feat._7b0;
- c = p->feat._7c0;
- d = p->feat._7d0;
-
- if ( !is_pvh_domain(currd) )
- {
- /*
- * Delete the PVH condition when HVMLite formally replaces PVH,
- * and HVM guests no longer enter a PV codepath.
- */
-
- /* OSPKE clear in policy. Fast-forward CR4 back in. */
- if ( curr->arch.pv_vcpu.ctrlreg[4] & X86_CR4_PKE )
- c |= cpufeat_mask(X86_FEATURE_OSPKE);
- }
- }
- else
- b = c = d = 0;
- a = 0;
- break;
-
case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
!vpmu_enabled(curr) )
unsupported:
a = b = c = d = 0;
break;
+
+ case 0x7:
+ ASSERT_UNREACHABLE();
+ /* Now handled in guest_cpuid(). */
}
regs->rax = a;
* - {xcr0,xss}_{high,low}
*
* - Guest accurate:
+ * - All of the feat union
* - max_{,sub}leaf
* - All FEATURESET_* words
*
* Per-domain objects:
*
* - Guest accurate:
+ * - All of the feat union
* - max_{,sub}leaf
* - All FEATURESET_* words
*