* ept paging structures memory type to WB;
* 2) the CPU must support the EPT page-walk length of 4 according to
* Intel SDM 25.2.2.
+ * 3) the CPU must support INVEPT all context invalidation, because we
+ * will use it as final resort if other types are not supported.
*
* Or we just don't use EPT.
*/
if ( !(_vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB) ||
- !(_vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) )
+ !(_vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) ||
+ !(_vmx_ept_vpid_cap & VMX_EPT_INVEPT_ALL_CONTEXT) )
_vmx_secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
}
hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0);
- ept_sync_all();
+ if ( cpu_has_vmx_ept )
+ ept_sync_all();
if ( cpu_has_vmx_vpid )
vpid_sync_all();
/* Test-and-test-and-set this CPU in the EPT-is-synced mask. */
if ( !cpu_isset(cpu, d->arch.hvm_domain.vmx.ept_synced) &&
!cpu_test_and_set(cpu, d->arch.hvm_domain.vmx.ept_synced) )
- __invept(1, d->arch.hvm_domain.vmx.ept_control.eptp, 0);
+ __invept(INVEPT_SINGLE_CONTEXT, ept_get_eptp(d), 0);
}
vmx_restore_guest_msrs(v);
static void __ept_sync_domain(void *info)
{
struct domain *d = info;
- __invept(1, d->arch.hvm_domain.vmx.ept_control.eptp, 0);
+ __invept(INVEPT_SINGLE_CONTEXT, ept_get_eptp(d), 0);
}
void ept_sync_domain(struct domain *d)
cpumask_t ept_synced;
};
-#define ept_get_wl(d) \
+#define ept_get_wl(d) \
((d)->arch.hvm_domain.vmx.ept_control.ept_wl)
+#define ept_get_asr(d) \
+ ((d)->arch.hvm_domain.vmx.ept_control.asr)
+#define ept_get_eptp(d) \
+ ((d)->arch.hvm_domain.vmx.ept_control.eptp)
struct arch_vmx_struct {
/* Virtual address of VMCS. */
#define VMX_EPT_MEMORY_TYPE_WB 0x00004000
#define VMX_EPT_SUPERPAGE_2MB 0x00010000
#define VMX_EPT_SUPERPAGE_1GB 0x00020000
+#define VMX_EPT_INVEPT_INSTRUCTION 0x00100000
+#define VMX_EPT_INVEPT_SINGLE_CONTEXT 0x02000000
+#define VMX_EPT_INVEPT_ALL_CONTEXT 0x04000000
#define cpu_has_wbinvd_exiting \
(vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING)
u64 epte;
} ept_entry_t;
-#define EPT_TABLE_ORDER 9
+#define EPT_TABLE_ORDER 9
#define EPTE_SUPER_PAGE_MASK 0x80
-#define EPTE_MFN_MASK 0x1fffffffffff000
+#define EPTE_MFN_MASK 0xffffffffff000ULL
#define EPTE_AVAIL1_MASK 0xF00
#define EPTE_EMT_MASK 0x38
#define EPTE_IGMT_MASK 0x40
(vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_1GB)
#define cpu_has_vmx_ept_2mb \
(vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB)
+#define cpu_has_vmx_ept_invept_single_context \
+ (vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT)
+#define INVEPT_SINGLE_CONTEXT 1
+#define INVEPT_ALL_CONTEXT 2
static inline void __vmptrld(u64 addr)
{
__vmwrite(field, __vmread(field) & ~(1UL << bit));
}
-static inline void __invept(int ext, u64 eptp, u64 gpa)
+static inline void __invept(int type, u64 eptp, u64 gpa)
{
struct {
u64 eptp, gpa;
} operand = {eptp, gpa};
+ /*
+ * If single context invalidation is not supported, we escalate to
+ * use all context invalidation.
+ */
+ if ( (type == INVEPT_SINGLE_CONTEXT) &&
+ !cpu_has_vmx_ept_invept_single_context )
+ type = INVEPT_ALL_CONTEXT;
+
asm volatile ( INVEPT_OPCODE
MODRM_EAX_08
/* CF==1 or ZF==1 --> crash (ud2) */
"ja 1f ; ud2 ; 1:\n"
:
- : "a" (&operand), "c" (ext)
+ : "a" (&operand), "c" (type)
: "memory" );
}
static inline void ept_sync_all(void)
{
- if ( !current->domain->arch.hvm_domain.hap_enabled )
- return;
-
- __invept(2, 0, 0);
+ __invept(INVEPT_ALL_CONTEXT, 0, 0);
}
void ept_sync_domain(struct domain *d);