]> xenbits.xensource.com Git - xen.git/commitdiff
x86/shadow: fix race condition sampling the dirty vram state
authorAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 23 Sep 2014 12:31:47 +0000 (14:31 +0200)
committerJan Beulich <jbeulich@suse.com>
Tue, 23 Sep 2014 12:31:47 +0000 (14:31 +0200)
d->arch.hvm_domain.dirty_vram must be read with the domain's paging lock held.

If not, two concurrent hypercalls could both end up attempting to free
dirty_vram (the second of which will free a wild pointer), or both end up
allocating a new dirty_vram structure (the first of which will be leaked).

This is XSA-104.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
xen/arch/x86/mm/shadow/common.c
xen/include/asm-x86/hvm/domain.h

index 3c803b619013ca0830dfb43c05712b9247b1e7fe..9115a785ce35b6d5b9ccae91b567081b02582876 100644 (file)
@@ -3485,7 +3485,7 @@ int shadow_track_dirty_vram(struct domain *d,
     int flush_tlb = 0;
     unsigned long i;
     p2m_type_t t;
-    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+    struct sh_dirty_vram *dirty_vram;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     if ( end_pfn < begin_pfn || end_pfn > p2m->max_mapped_pfn + 1 )
@@ -3495,6 +3495,8 @@ int shadow_track_dirty_vram(struct domain *d,
     p2m_lock(p2m_get_hostp2m(d));
     paging_lock(d);
 
+    dirty_vram = d->arch.hvm_domain.dirty_vram;
+
     if ( dirty_vram && (!nr ||
              ( begin_pfn != dirty_vram->begin_pfn
             || end_pfn   != dirty_vram->end_pfn )) )
index 30d4aa3d857bfa10220e40b737800709fe4f30d7..2757c7fedd4fa8a0c8f7b0d7b0c5f0cc2bc6cbd1 100644 (file)
@@ -112,7 +112,7 @@ struct hvm_domain {
     /* Memory ranges with pinned cache attributes. */
     struct list_head       pinned_cacheattr_ranges;
 
-    /* VRAM dirty support. */
+    /* VRAM dirty support.  Protect with the domain paging lock. */
     struct sh_dirty_vram *dirty_vram;
 
     /* If one of vcpus of this domain is in no_fill_mode or