]> xenbits.xensource.com Git - xen.git/commitdiff
x86: don't store possibly stale TLB flush time stamp
authorJan Beulich <jbeulich@suse.com>
Thu, 12 Oct 2017 13:06:55 +0000 (15:06 +0200)
committerJan Beulich <jbeulich@suse.com>
Thu, 12 Oct 2017 13:06:55 +0000 (15:06 +0200)
While the timing window is extremely narrow, it is theoretically
possible for an update to the TLB flush clock and a subsequent flush
IPI to happen between the read and write parts of the update of the
per-page stamp. Exclude this possibility by disabling interrupts
across the update, preventing the IPI to be serviced in the middle.

This is XSA-241.

Reported-by: Jann Horn <jannh@google.com>
Suggested-by: George Dunlap <george.dunlap@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
master commit: 23a183607a427572185fc51c76cc5ab11c00c4cc
master date: 2017-10-12 14:48:25 +0200

xen/arch/arm/smp.c
xen/arch/x86/mm.c
xen/arch/x86/mm/shadow/common.c
xen/common/page_alloc.c
xen/include/asm-arm/flushtlb.h
xen/include/asm-x86/flushtlb.h

index e7df0874d6f32facc04fb8094abe72fc28a6d94c..4d6274d7a694c316d86a82288e4ef9a7609fc744 100644 (file)
@@ -1,3 +1,4 @@
+#include <xen/mm.h>
 #include <asm/system.h>
 #include <asm/smp.h>
 #include <asm/cpregs.h>
index 45419dc22f4c1e1634b6d441a8e9be2af6a39420..4b3c2a1198eeb890cc5513304d03b1d47333cdb8 100644 (file)
@@ -2524,7 +2524,7 @@ static int _put_final_page_type(struct page_info *page, unsigned long type,
          */
         if ( !(shadow_mode_enabled(page_get_owner(page)) &&
                (page->count_info & PGC_page_table)) )
-            page->tlbflush_timestamp = tlbflush_current_time();
+            page_set_tlbflush_timestamp(page);
         wmb();
         page->u.inuse.type_info--;
     }
@@ -2534,7 +2534,7 @@ static int _put_final_page_type(struct page_info *page, unsigned long type,
                 (PGT_count_mask|PGT_validated|PGT_partial)) == 1);
         if ( !(shadow_mode_enabled(page_get_owner(page)) &&
                (page->count_info & PGC_page_table)) )
-            page->tlbflush_timestamp = tlbflush_current_time();
+            page_set_tlbflush_timestamp(page);
         wmb();
         page->u.inuse.type_info |= PGT_validated;
     }
@@ -2588,7 +2588,7 @@ static int _put_page_type(struct page_info *page, bool preemptible,
             if ( ptpg && PGT_type_equal(x, ptpg->u.inuse.type_info) )
             {
                 /*
-                 * page_set_tlbflush_timestamp() accesses the same union
+                 * set_tlbflush_timestamp() accesses the same union
                  * linear_pt_count lives in. Unvalidated page table pages,
                  * however, should occur during domain destruction only
                  * anyway.  Updating of linear_pt_count luckily is not
@@ -2609,7 +2609,7 @@ static int _put_page_type(struct page_info *page, bool preemptible,
              */
             if ( !(shadow_mode_enabled(page_get_owner(page)) &&
                    (page->count_info & PGC_page_table)) )
-                page->tlbflush_timestamp = tlbflush_current_time();
+                page_set_tlbflush_timestamp(page);
         }
 
         if ( likely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) == x) )
index 2e64a774d85de0d0dbda28344decec66ac217d20..bfc4c796aa5b9579dbb28aaf937b17155d55e9f9 100644 (file)
@@ -1464,7 +1464,7 @@ void shadow_free(struct domain *d, mfn_t smfn)
          * TLBs when we reuse the page.  Because the destructors leave the
          * contents of the pages in place, we can delay TLB flushes until
          * just before the allocator hands the page out again. */
-        sp->tlbflush_timestamp = tlbflush_current_time();
+        page_set_tlbflush_timestamp(sp);
         perfc_decr(shadow_alloc_count);
         page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
         sp = next;
index 6225551cb213b8b137b89e803eade6eac4c30581..0bc5f291fd241ecd5c686c495c81132384a03c3b 100644 (file)
@@ -964,7 +964,7 @@ static void free_heap_pages(
         /* If a page has no owner it will need no safety TLB flush. */
         pg[i].u.free.need_tlbflush = (page_get_owner(&pg[i]) != NULL);
         if ( pg[i].u.free.need_tlbflush )
-            pg[i].tlbflush_timestamp = tlbflush_current_time();
+            page_set_tlbflush_timestamp(&pg[i]);
 
         /* This page is not a guest frame any more. */
         page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
index a8e8a0536344b8d311c184ca90bdee486e92d2c2..83ff9fa8b355da19ad8f9b0cd55b8fdf7d2e0c1b 100644 (file)
@@ -12,6 +12,11 @@ static inline void tlbflush_filter(cpumask_t *mask, uint32_t page_timestamp) {}
 
 #define tlbflush_current_time()                 (0)
 
+static inline void page_set_tlbflush_timestamp(struct page_info *page)
+{
+    page->tlbflush_timestamp = tlbflush_current_time();
+}
+
 #if defined(CONFIG_ARM_32)
 # include <asm/arm32/flushtlb.h>
 #elif defined(CONFIG_ARM_64)
index 8b7adef7c59f3532c0e50f546c049849fa7e00ea..5f78bbbe3d4a31a626dc98ebde4616a680bc1c0c 100644 (file)
@@ -23,6 +23,20 @@ DECLARE_PER_CPU(u32, tlbflush_time);
 
 #define tlbflush_current_time() tlbflush_clock
 
+static inline void page_set_tlbflush_timestamp(struct page_info *page)
+{
+    /*
+     * Prevent storing a stale time stamp, which could happen if an update
+     * to tlbflush_clock plus a subsequent flush IPI happen between the
+     * reading of tlbflush_clock and the writing of the struct page_info
+     * field.
+     */
+    ASSERT(local_irq_is_enabled());
+    local_irq_disable();
+    page->tlbflush_timestamp = tlbflush_current_time();
+    local_irq_enable();
+}
+
 /*
  * @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing.
  * @lastuse_stamp is a timestamp taken when the PFN we are testing was last