guest_l4e_t *l4p;
#endif
uint32_t gflags, mflags, iflags, rc = 0;
- int smep;
+ bool_t smep = 0, smap = 0;
bool_t pse1G = 0, pse2M = 0;
p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE;
mflags = mandatory_flags(v, pfec);
iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
- /* SMEP: kernel-mode instruction fetches from user-mode mappings
- * should fault. Unlike NX or invalid bits, we're looking for _all_
- * entries in the walk to have _PAGE_USER set, so we need to do the
- * whole walk as if it were a user-mode one and then invert the answer. */
- smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v)
- && (pfec & PFEC_insn_fetch) && !(pfec & PFEC_user_mode) );
- if ( smep )
+ if ( is_hvm_vcpu(v) && !(pfec & PFEC_user_mode) )
+ {
+ struct segment_register seg;
+ const struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+ hvm_get_segment_register(v, x86_seg_ss, &seg);
+
+ /* SMEP: kernel-mode instruction fetches from user-mode mappings
+ * should fault. Unlike NX or invalid bits, we're looking for _all_
+ * entries in the walk to have _PAGE_USER set, so we need to do the
+ * whole walk as if it were a user-mode one and then invert the answer. */
+ smep = hvm_smep_enabled(v) && (pfec & PFEC_insn_fetch);
+
+ /*
+ * SMAP: kernel-mode data accesses from user-mode mappings should fault
+ * A fault is considered as a SMAP violation if the following
+ * conditions come true:
+ * - X86_CR4_SMAP is set in CR4
+ * - A user page is accessed
+ * - CPL = 3 or X86_EFLAGS_AC is clear
+ * - Page fault in kernel mode
+ */
+ smap = hvm_smap_enabled(v) &&
+ ((seg.attr.fields.dpl == 3) || !(regs->eflags & X86_EFLAGS_AC));
+ }
+
+ if ( smep || smap )
mflags |= _PAGE_USER;
#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
set_ad:
#endif
- /* Now re-invert the user-mode requirement for SMEP. */
- if ( smep )
+ /* Now re-invert the user-mode requirement for SMEP and SMAP */
+ if ( smep || smap )
rc ^= _PAGE_USER;
/* Go back and set accessed and dirty bits only if the walk was a
(hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
#define hvm_smep_enabled(v) \
(hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
+#define hvm_smap_enabled(v) \
+ (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMAP))
#define hvm_nx_enabled(v) \
(!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
static inline bool_t hvm_vcpu_has_smep(void)
{
- unsigned int eax, ebx;
+ unsigned int eax, ebx, ecx = 0;
hvm_cpuid(0, &eax, NULL, NULL, NULL);
- if (eax < 7)
+ if ( eax < 7 )
return 0;
- hvm_cpuid(7, NULL, &ebx, NULL, NULL);
+ hvm_cpuid(7, NULL, &ebx, &ecx, NULL);
return !!(ebx & cpufeat_mask(X86_FEATURE_SMEP));
}
+static inline bool_t hvm_vcpu_has_smap(void)
+{
+ unsigned int eax, ebx, ecx = 0;
+
+ hvm_cpuid(0, &eax, NULL, NULL, NULL);
+
+ if ( eax < 7 )
+ return 0;
+
+ hvm_cpuid(7, NULL, &ebx, &ecx, NULL);
+ return !!(ebx & cpufeat_mask(X86_FEATURE_SMAP));
+}
+
/* These reserved bits in lower 32 remain 0 after any load of CR0 */
#define HVM_CR0_GUEST_RESERVED_BITS \
(~((unsigned long) \
X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
(hvm_vcpu_has_smep() ? X86_CR4_SMEP : 0) | \
+ (hvm_vcpu_has_smap() ? X86_CR4_SMAP : 0) | \
(cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) | \
((nestedhvm_enabled((_v)->domain) && cpu_has_vmx)\
? X86_CR4_VMXE : 0) | \