]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
x86/vmx: Properly flush the TLB when an altp2m is modified
authorAndrew Cooper <andrew.cooper3@citrix.com>
Mon, 18 Mar 2019 16:02:49 +0000 (17:02 +0100)
committerJan Beulich <jbeulich@suse.com>
Mon, 18 Mar 2019 16:02:49 +0000 (17:02 +0100)
Modifications to an altp2m mark the p2m as needing flushing, but this was
never wired up in the return-to-guest path.  As a result, stale TLB entries
can remain after resuming the guest.

In practice, this manifests as a missing EPT_VIOLATION or #VE exception when
the guest subsequently accesses a page which has had its permissions reduced.

vmx_vmenter_helper() now has 11 p2ms to potentially invalidate, but issuing 11
INVEPT instructions isn't clever.  Instead, count how many contexts need
invalidating, and use INVEPT_ALL_CONTEXT if two or more are in need of
flushing.

This doesn't have an XSA because altp2m is not yet a security-supported
feature.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
master commit: 69f7643df68ef8e994221a996e336a47cbb7bbc8
master date: 2019-02-28 11:16:27 +0000

xen/arch/x86/hvm/vmx/vmx.c

index f9e5d74d1964eae3af8b106556401835b95ce9a6..2c7d194cd28e110b353d7f9194bfb666c7479aa5 100644 (file)
@@ -4257,6 +4257,7 @@ static void lbr_fixup(void)
 bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
+    struct domain *currd = curr->domain;
     u32 new_asid, old_asid;
     struct hvm_vcpu_asid *p_asid;
     bool_t need_flush;
@@ -4303,17 +4304,42 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
 
     if ( paging_mode_hap(curr->domain) )
     {
-        struct ept_data *ept = &p2m_get_hostp2m(curr->domain)->ept;
+        struct ept_data *ept = &p2m_get_hostp2m(currd)->ept;
         unsigned int cpu = smp_processor_id();
+        unsigned int inv = 0; /* None => Single => All */
+        struct ept_data *single = NULL; /* Single eptp, iff inv == 1 */
 
         if ( cpumask_test_cpu(cpu, ept->invalidate) )
         {
             cpumask_clear_cpu(cpu, ept->invalidate);
-            if ( nestedhvm_enabled(curr->domain) )
-                __invept(INVEPT_ALL_CONTEXT, 0);
-            else
-                __invept(INVEPT_SINGLE_CONTEXT, ept->eptp);
+
+            /* Automatically invalidate all contexts if nested. */
+            inv += 1 + nestedhvm_enabled(currd);
+            single = ept;
+        }
+
+        if ( altp2m_active(currd) )
+        {
+            unsigned int i;
+
+            for ( i = 0; i < MAX_ALTP2M; ++i )
+            {
+                if ( currd->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+                    continue;
+
+                ept = &currd->arch.altp2m_p2m[i]->ept;
+                if ( cpumask_test_cpu(cpu, ept->invalidate) )
+                {
+                    cpumask_clear_cpu(cpu, ept->invalidate);
+                    inv++;
+                    single = ept;
+                }
+            }
         }
+
+        if ( inv )
+            __invept(inv == 1 ? INVEPT_SINGLE_CONTEXT : INVEPT_ALL_CONTEXT,
+                     inv == 1 ? single->eptp          : 0);
     }
 
  out: