]> xenbits.xensource.com Git - people/dariof/xen.git/commitdiff
x86: use paging_mark_pfn_dirty()
authorJan Beulich <jbeulich@suse.com>
Tue, 13 Feb 2018 16:29:50 +0000 (17:29 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 13 Feb 2018 16:29:50 +0000 (17:29 +0100)
... in preference over paging_mark_dirty(), when the PFN is known
anyway.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
xen/arch/x86/hvm/dm.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/ioreq.c
xen/arch/x86/mm.c
xen/arch/x86/mm/p2m-pod.c

index 8083ded467f2a5543f47228b6116aaca48353928..a96d5eb8fc8f4e3af5ed8399299ac64707cff0c2 100644 (file)
@@ -220,14 +220,12 @@ static int modified_memory(struct domain *d,
             page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
             if ( page )
             {
-                mfn_t gmfn = _mfn(page_to_mfn(page));
-
-                paging_mark_dirty(d, gmfn);
+                paging_mark_pfn_dirty(d, _pfn(pfn));
                 /*
                  * These are most probably not page tables any more
                  * don't take a long time and don't die either.
                  */
-                sh_remove_shadows(d, gmfn, 1, 0);
+                sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
                 put_page(page);
             }
         }
index 18d721d10d43f5f5a9b1b8031d0c2f2e58fe946b..91bc3e8b27eef4820f6d016233e432a6650e6be5 100644 (file)
@@ -1897,7 +1897,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
          */
         if ( npfec.write_access )
         {
-            paging_mark_dirty(currd, mfn);
+            paging_mark_pfn_dirty(currd, _pfn(gfn));
             /*
              * If p2m is really an altp2m, unlock here to avoid lock ordering
              * violation when the change below is propagated from host p2m.
@@ -2582,7 +2582,7 @@ static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
         if ( unlikely(p2m_is_discard_write(p2mt)) )
             *writable = 0;
         else if ( !permanent )
-            paging_mark_dirty(d, _mfn(page_to_mfn(page)));
+            paging_mark_pfn_dirty(d, _pfn(gfn));
     }
 
     if ( !permanent )
@@ -3245,7 +3245,7 @@ static enum hvm_translation_result __hvm_copy(
                     memcpy(p, buf, count);
                 else
                     memset(p, 0, count);
-                paging_mark_dirty(v->domain, _mfn(page_to_mfn(page)));
+                paging_mark_pfn_dirty(v->domain, _pfn(gfn_x(gfn)));
             }
         }
         else
index 5aeaaaccd933674fead0eb8b68b5371b7820d9c0..7e66965bcd5f6d98f93e309c6fbc52a2952e6694 100644 (file)
@@ -283,7 +283,7 @@ static int hvm_add_ioreq_gfn(
     rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
                                 _mfn(page_to_mfn(iorp->page)), 0);
     if ( rc == 0 )
-        paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
+        paging_mark_pfn_dirty(d, _pfn(iorp->gfn));
 
     return rc;
 }
index 86942c726533989618c4fbb8b00dafe23611a10e..e1f089be1465c9cd8d614c226b4304d484f8e28e 100644 (file)
@@ -3775,8 +3775,7 @@ long do_mmu_update(
             }
 
             set_gpfn_from_mfn(mfn, gpfn);
-
-            paging_mark_dirty(pg_owner, _mfn(mfn));
+            paging_mark_pfn_dirty(pg_owner, _pfn(gpfn));
 
             put_page(page);
             break;
index b5814214d7852bb059834159b100ce7dd5c43873..fa13e07f7c6d2cd0d8b7a596af481982c706f6a6 100644 (file)
@@ -1215,7 +1215,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn,
     for( i = 0; i < (1UL << order); i++ )
     {
         set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_x(gfn_aligned) + i);
-        paging_mark_dirty(d, mfn_add(mfn, i));
+        paging_mark_pfn_dirty(d, _pfn(gfn_x(gfn_aligned) + i));
     }
 
     p2m->pod.entry_count -= (1UL << order);