]> xenbits.xensource.com Git - people/jgross/xen.git/commitdiff
x86/p2m: pass old PTE directly to write_p2m_entry_pre() hook
authorJan Beulich <jbeulich@suse.com>
Fri, 8 Jan 2021 15:50:11 +0000 (16:50 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 8 Jan 2021 15:50:11 +0000 (16:50 +0100)
In no case is a pointer to non-const needed. Since no pointer arithmetic
is done by the sole user of the hook, passing in the PTE itself is quite
fine.

While doing this adjustment also
- drop the intermediate sh_write_p2m_entry_pre():
  sh_unshadow_for_p2m_change() can itself be used as the hook function,
  moving the conditional into there,
- introduce a local variable holding the flags of the old entry.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
xen/arch/x86/mm/p2m-pt.c
xen/arch/x86/mm/shadow/common.c
xen/include/asm-x86/p2m.h

index 848773e1cde34170c57fa2519880bbb93d4cff2a..c43d5d0413a12045fd32adaf9d3b550a4dded993 100644 (file)
@@ -135,7 +135,7 @@ static int write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
         paging_lock(d);
 
         if ( p2m->write_p2m_entry_pre && gfn != gfn_x(INVALID_GFN) )
-            p2m->write_p2m_entry_pre(d, gfn, p, new, level);
+            p2m->write_p2m_entry_pre(d, gfn, *p, new, level);
 
         oflags = l1e_get_flags(*p);
         omfn = l1e_get_mfn(*p);
index 3298711972ad631c61ee95a5b75d56e8641a607a..e2ed049f14f51b4ad4345c8c63165ea230c7e6ee 100644 (file)
@@ -3086,19 +3086,28 @@ static int shadow_test_disable(struct domain *d)
  */
 
 static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
-                                       l1_pgentry_t *p, l1_pgentry_t new,
+                                       l1_pgentry_t old, l1_pgentry_t new,
                                        unsigned int level)
 {
+    unsigned int oflags = l1e_get_flags(old);
+
+    /*
+     * If there are any shadows, update them.  But if shadow_teardown()
+     * has already been called then it's not safe to try.
+     */
+    if ( unlikely(!d->arch.paging.shadow.total_pages) )
+        return;
+
     /* The following assertion is to make sure we don't step on 1GB host
      * page support of HVM guest. */
-    ASSERT(!(level > 2 && (l1e_get_flags(*p) & _PAGE_PRESENT) &&
-             (l1e_get_flags(*p) & _PAGE_PSE)));
+    ASSERT(!(level > 2 && (oflags & _PAGE_PRESENT) && (oflags & _PAGE_PSE)));
 
     /* If we're removing an MFN from the p2m, remove it from the shadows too */
     if ( level == 1 )
     {
-        mfn_t mfn = l1e_get_mfn(*p);
-        p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
+        mfn_t mfn = l1e_get_mfn(old);
+        p2m_type_t p2mt = p2m_flags_to_type(oflags);
+
         if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
         {
             sh_remove_all_shadows_and_parents(d, mfn);
@@ -3110,15 +3119,15 @@ static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
     /* If we're removing a superpage mapping from the p2m, we need to check
      * all the pages covered by it.  If they're still there in the new
      * scheme, that's OK, but otherwise they must be unshadowed. */
-    if ( level == 2 && (l1e_get_flags(*p) & _PAGE_PRESENT) &&
-         (l1e_get_flags(*p) & _PAGE_PSE) )
+    if ( level == 2 && (oflags & _PAGE_PRESENT) && (oflags & _PAGE_PSE) )
     {
         unsigned int i;
         cpumask_t flushmask;
-        mfn_t omfn = l1e_get_mfn(*p);
+        mfn_t omfn = l1e_get_mfn(old);
         mfn_t nmfn = l1e_get_mfn(new);
         l1_pgentry_t *npte = NULL;
-        p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
+        p2m_type_t p2mt = p2m_flags_to_type(oflags);
+
         if ( p2m_is_valid(p2mt) && mfn_valid(omfn) )
         {
             cpumask_clear(&flushmask);
@@ -3152,16 +3161,6 @@ static void sh_unshadow_for_p2m_change(struct domain *d, unsigned long gfn,
     }
 }
 
-static void
-sh_write_p2m_entry_pre(struct domain *d, unsigned long gfn, l1_pgentry_t *p,
-                       l1_pgentry_t new, unsigned int level)
-{
-    /* If there are any shadows, update them.  But if shadow_teardown()
-     * has already been called then it's not safe to try. */
-    if ( likely(d->arch.paging.shadow.total_pages != 0) )
-         sh_unshadow_for_p2m_change(d, gfn, p, new, level);
-}
-
 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
 static void
 sh_write_p2m_entry_post(struct p2m_domain *p2m, unsigned int oflags)
@@ -3186,7 +3185,7 @@ sh_write_p2m_entry_post(struct p2m_domain *p2m, unsigned int oflags)
 
 void shadow_p2m_init(struct p2m_domain *p2m)
 {
-    p2m->write_p2m_entry_pre  = sh_write_p2m_entry_pre;
+    p2m->write_p2m_entry_pre  = sh_unshadow_for_p2m_change;
     p2m->write_p2m_entry_post = sh_write_p2m_entry_post;
 }
 
index 6447696bcd45059e2904d1cefc54a49d49818e02..6acbd1e3901c584daa1e19b7e0e6217dbbb981d0 100644 (file)
@@ -274,7 +274,7 @@ struct p2m_domain {
     void               (*memory_type_changed)(struct p2m_domain *p2m);
     void               (*write_p2m_entry_pre)(struct domain *d,
                                               unsigned long gfn,
-                                              l1_pgentry_t *p,
+                                              l1_pgentry_t old,
                                               l1_pgentry_t new,
                                               unsigned int level);
     void               (*write_p2m_entry_post)(struct p2m_domain *p2m,