]> xenbits.xensource.com Git - xen.git/commitdiff
x86/mm: fix checks against max_mapped_pfn
authorJan Beulich <JBeulich@suse.com>
Thu, 3 Apr 2014 07:47:28 +0000 (08:47 +0100)
committerTim Deegan <tim@xen.org>
Thu, 3 Apr 2014 11:08:43 +0000 (12:08 +0100)
This value is an inclusive one, i.e. this fixes an off-by-one in memory
sharing and an off-by-two in shadow code.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
xen/arch/x86/mm/mem_sharing.c
xen/arch/x86/mm/shadow/common.c

index 7ed6594f91042088d2b3e3632254adf5d69fd235..237d3466924171adc7ae885c1e095dca2e5b5451 100644 (file)
@@ -1267,8 +1267,8 @@ int relinquish_shared_pages(struct domain *d)
         return 0;
 
     p2m_lock(p2m);
-    for (gfn = p2m->next_shared_gfn_to_relinquish; 
-         gfn < p2m->max_mapped_pfn; gfn++ )
+    for ( gfn = p2m->next_shared_gfn_to_relinquish;
+          gfn <= p2m->max_mapped_pfn; gfn++ )
     {
         p2m_access_t a;
         p2m_type_t t;
index 517b5f178e5251812d98f50d2feee5f9ad12628f..9258d2aa7f44e7b5f31cb7c1adb7c1122de50edc 100644 (file)
@@ -3487,9 +3487,7 @@ int shadow_track_dirty_vram(struct domain *d,
     struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
-    if (end_pfn < begin_pfn
-            || begin_pfn > p2m->max_mapped_pfn
-            || end_pfn >= p2m->max_mapped_pfn)
+    if ( end_pfn < begin_pfn || end_pfn > p2m->max_mapped_pfn + 1 )
         return -EINVAL;
 
     /* We perform p2m lookups, so lock the p2m upfront to avoid deadlock */