/* Dynamic (run-time adjusted) execution control flags. */
struct vmx_caps __ro_after_init vmx_caps;
-u32 vmx_vmentry_control __read_mostly;
u64 vmx_ept_vpid_cap __read_mostly;
static uint64_t __read_mostly vmx_vmfunc;
struct vmx_caps caps = {};
u64 _vmx_ept_vpid_cap = 0;
u64 _vmx_misc_cap = 0;
- u32 _vmx_vmentry_control;
u64 _vmx_vmfunc = 0;
bool mismatch = false;
min = 0;
opt = (VM_ENTRY_LOAD_GUEST_PAT | VM_ENTRY_LOAD_GUEST_EFER |
VM_ENTRY_LOAD_BNDCFGS);
- _vmx_vmentry_control = adjust_vmx_controls(
+ caps.vmentry_control = adjust_vmx_controls(
"VMEntry Control", min, opt, MSR_IA32_VMX_ENTRY_CTLS, &mismatch);
if ( mismatch )
/* First time through. */
vmx_caps = caps;
vmx_ept_vpid_cap = _vmx_ept_vpid_cap;
- vmx_vmentry_control = _vmx_vmentry_control;
vmx_caps.basic_msr = ((uint64_t)vmx_basic_msr_high << 32) |
vmx_basic_msr_low;
vmx_vmfunc = _vmx_vmfunc;
vmx_caps.vmexit_control, caps.vmexit_control);
mismatch |= cap_check(
"VMEntry Control",
- vmx_vmentry_control, _vmx_vmentry_control);
+ vmx_caps.vmentry_control, caps.vmentry_control);
mismatch |= cap_check(
"EPT and VPID Capability",
vmx_ept_vpid_cap, _vmx_ept_vpid_cap);
{
struct domain *d = v->domain;
uint32_t vmexit_ctl = vmx_caps.vmexit_control;
- u32 vmentry_ctl = vmx_vmentry_control;
+ u32 vmentry_ctl = vmx_caps.vmentry_control;
int rc = 0;
vmx_vmcs_enter(v);
* Make sure all dependent features are off as well.
*/
memset(&vmx_caps, 0, sizeof(vmx_caps));
- vmx_vmentry_control = 0;
vmx_ept_vpid_cap = 0;
vmx_vmfunc = 0;
}
#define VM_ENTRY_LOAD_GUEST_PAT 0x00004000
#define VM_ENTRY_LOAD_GUEST_EFER 0x00008000
#define VM_ENTRY_LOAD_BNDCFGS 0x00010000
-extern u32 vmx_vmentry_control;
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001U
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002U
uint32_t secondary_exec_control;
uint64_t tertiary_exec_control;
uint32_t vmexit_control;
+ uint32_t vmentry_control;
};
extern struct vmx_caps vmx_caps;
(vmx_caps.cpu_based_exec_control & CPU_BASED_MONITOR_TRAP_FLAG))
#define cpu_has_vmx_pat \
(IS_ENABLED(CONFIG_INTEL_VMX) && \
- vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_PAT)
+ (vmx_caps.vmentry_control & VM_ENTRY_LOAD_GUEST_PAT))
#define cpu_has_vmx_efer \
(IS_ENABLED(CONFIG_INTEL_VMX) && \
- vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_EFER)
+ (vmx_caps.vmentry_control & VM_ENTRY_LOAD_GUEST_EFER))
#define cpu_has_vmx_unrestricted_guest \
(IS_ENABLED(CONFIG_INTEL_VMX) && \
(vmx_caps.secondary_exec_control & SECONDARY_EXEC_UNRESTRICTED_GUEST))
#define cpu_has_vmx_mpx \
(IS_ENABLED(CONFIG_INTEL_VMX) && \
(vmx_caps.vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
- (vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
+ (vmx_caps.vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
#define cpu_has_vmx_xsaves \
(IS_ENABLED(CONFIG_INTEL_VMX) && \
(vmx_caps.secondary_exec_control & SECONDARY_EXEC_XSAVES))