]> xenbits.xensource.com Git - people/dariof/xen.git/commitdiff
x86/np2m: flush all np2m objects on nested INVEPT
authorSergey Dyasli <sergey.dyasli@citrix.com>
Tue, 3 Oct 2017 15:20:57 +0000 (16:20 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 6 Oct 2017 12:36:43 +0000 (13:36 +0100)
At the moment, nvmx_handle_invept() updates the current np2m just to
flush it.  Instead introduce a function, np2m_flush_base(), which will
look up the np2m base pointer and call p2m_flush_table() instead.

Unfortunately, since we don't know which p2m a given vcpu is using, we
must flush all p2ms that share that base pointer.

Convert p2m_flush_table() into p2m_flush_table_locked() in order not
to release the p2m_lock after np2m_base check.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jun Nakajima <jun.nakajima@intel.com>
xen/arch/x86/hvm/vmx/vvmx.c
xen/arch/x86/mm/p2m.c
xen/include/asm-x86/p2m.h

index cd0ee0a307576ee2ed3403937fa9a59549ffbfcd..d333aa6d78d8a84800e24df75a819c201c53b391 100644 (file)
@@ -1910,12 +1910,7 @@ int nvmx_handle_invept(struct cpu_user_regs *regs)
     {
     case INVEPT_SINGLE_CONTEXT:
     {
-        struct p2m_domain *p2m = p2m_get_nestedp2m(current, eptp);
-        if ( p2m )
-        {
-            p2m_flush(current, p2m);
-            ept_sync_domain(p2m);
-        }
+        np2m_flush_base(current, eptp);
         break;
     }
     case INVEPT_ALL_CONTEXT:
index 16391c2db96a6ed12a0c17344b67238221412cb7..54f42a1c6c3c3dc3c817423426a9b7c96ed15aca 100644 (file)
@@ -1729,15 +1729,14 @@ p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
     return p2m;
 }
 
-/* Reset this p2m table to be empty */
 static void
-p2m_flush_table(struct p2m_domain *p2m)
+p2m_flush_table_locked(struct p2m_domain *p2m)
 {
     struct page_info *top, *pg;
     struct domain *d = p2m->domain;
     mfn_t mfn;
 
-    p2m_lock(p2m);
+    ASSERT(p2m_locked_by_me(p2m));
 
     /*
      * "Host" p2m tables can have shared entries &c that need a bit more care
@@ -1750,10 +1749,7 @@ p2m_flush_table(struct p2m_domain *p2m)
 
     /* No need to flush if it's already empty */
     if ( p2m_is_nestedp2m(p2m) && p2m->np2m_base == P2M_BASE_EADDR )
-    {
-        p2m_unlock(p2m);
         return;
-    }
 
     /* This is no longer a valid nested p2m for any address space */
     p2m->np2m_base = P2M_BASE_EADDR;
@@ -1773,7 +1769,14 @@ p2m_flush_table(struct p2m_domain *p2m)
             d->arch.paging.free_page(d, pg);
     }
     page_list_add(top, &p2m->pages);
+}
 
+/* Reset this p2m table to be empty */
+static void
+p2m_flush_table(struct p2m_domain *p2m)
+{
+    p2m_lock(p2m);
+    p2m_flush_table_locked(p2m);
     p2m_unlock(p2m);
 }
 
@@ -1794,6 +1797,26 @@ p2m_flush_nestedp2m(struct domain *d)
         p2m_flush_table(d->arch.nested_p2m[i]);
 }
 
+void np2m_flush_base(struct vcpu *v, unsigned long np2m_base)
+{
+    struct domain *d = v->domain;
+    struct p2m_domain *p2m;
+    unsigned int i;
+
+    np2m_base &= ~(0xfffull);
+
+    nestedp2m_lock(d);
+    for ( i = 0; i < MAX_NESTEDP2M; i++ )
+    {
+        p2m = d->arch.nested_p2m[i];
+        p2m_lock(p2m);
+        if ( p2m->np2m_base == np2m_base )
+            p2m_flush_table_locked(p2m);
+        p2m_unlock(p2m);
+    }
+    nestedp2m_unlock(d);
+}
+
 static void assign_np2m(struct vcpu *v, struct p2m_domain *p2m)
 {
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
index ec9e716dd50057720f2de880f8c7b955d8077be6..d5c3d01c47618839f81a311f00211456b853325d 100644 (file)
@@ -770,6 +770,8 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa);
 void p2m_flush(struct vcpu *v, struct p2m_domain *p2m);
 /* Flushes all nested p2m tables */
 void p2m_flush_nestedp2m(struct domain *d);
+/* Flushes all np2m objects with the specified np2m_base */
+void np2m_flush_base(struct vcpu *v, unsigned long np2m_base);
 
 void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
     l1_pgentry_t *p, l1_pgentry_t new, unsigned int level);