]> xenbits.xensource.com Git - legacy/linux-2.6.18-xen.git/commitdiff
[LINUX] gnttab: Fix copy_grant_page race with seqlock
authorkfraser <kfraser@localhost.localdomain>
Thu, 7 Jun 2007 10:04:08 +0000 (11:04 +0100)
committerkfraser <kfraser@localhost.localdomain>
Thu, 7 Jun 2007 10:04:08 +0000 (11:04 +0100)
Previously gnttab_copy_grant_page would always unmap the grant table
entry, even if DMA operations were outstanding.  This would allow a
hostile guest to free a page still used by DMA to the hypervisor.

This patch fixes this by making sure that we don't free the grant
table entry if a DMA operation has taken place.  To achieve this a
seqlock is used to synchronise the DMA operations and
copy_grant_page.

The DMA operations use the read side of the seqlock so performance
should be largely unaffected.

Thanks to Isaku Yamahata for noticing the race condition.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/xen/core/gnttab.c

index 1e015582f04eeab478156418755baa393ba3c001..a4edfdeeabd51fe5b6ec37965139e4f551abf9f3 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/seqlock.h>
 #include <xen/interface/xen.h>
 #include <xen/gnttab.h>
 #include <asm/pgtable.h>
@@ -63,6 +64,8 @@ static struct grant_entry *shared;
 
 static struct gnttab_free_callback *gnttab_free_callback_list;
 
+static DEFINE_SEQLOCK(gnttab_dma_lock);
+
 static int gnttab_expand(unsigned int req_entries);
 
 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
@@ -492,11 +495,6 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
 
 static void gnttab_page_free(struct page *page)
 {
-       if (page->mapping) {
-               put_page((struct page *)page->mapping);
-               page->mapping = NULL;
-       }
-
        ClearPageForeign(page);
        gnttab_reset_grant_page(page);
        put_page(page);
@@ -538,16 +536,22 @@ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
        mfn = pfn_to_mfn(pfn);
        new_mfn = virt_to_mfn(new_addr);
 
-       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-               set_phys_to_machine(pfn, new_mfn);
-               set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
+       write_seqlock(&gnttab_dma_lock);
 
-               mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-               mmu.val = pfn;
-               err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
-               BUG_ON(err);
+       /* Make seq visible before checking page_mapped. */
+       smp_mb();
+
+       /* Has the page been DMA-mapped? */
+       if (unlikely(page_mapped(page))) {
+               write_sequnlock(&gnttab_dma_lock);
+               put_page(new_page);
+               err = -EBUSY;
+               goto out;
        }
 
+       if (!xen_feature(XENFEAT_auto_translated_physmap))
+               set_phys_to_machine(pfn, new_mfn);
+
        gnttab_set_replace_op(&unmap, (unsigned long)addr,
                              (unsigned long)new_addr, ref);
 
@@ -556,6 +560,17 @@ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
        BUG_ON(err);
        BUG_ON(unmap.status);
 
+       write_sequnlock(&gnttab_dma_lock);
+
+       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
+
+               mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+               mmu.val = pfn;
+               err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
+               BUG_ON(err);
+       }
+
        new_page->mapping = page->mapping;
        new_page->index = page->index;
        set_bit(PG_foreign, &new_page->flags);
@@ -564,22 +579,9 @@ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
        SetPageForeign(page, gnttab_page_free);
        page->mapping = NULL;
 
-       /*
-        * Ensure that there is a barrier between setting the p2m entry
-        * and checking the map count.  See gnttab_dma_map_page.
-        */
-       smp_mb();
-
-       /* Has the page been DMA-mapped? */
-       if (unlikely(page_mapped(page))) {
-               err = -EBUSY;
-               page->mapping = (void *)new_page;
-       }
-
 out:
        put_page(page);
        return err;
-
 }
 EXPORT_SYMBOL(gnttab_copy_grant_page);
 
@@ -593,23 +595,27 @@ EXPORT_SYMBOL(gnttab_copy_grant_page);
  */
 maddr_t gnttab_dma_map_page(struct page *page)
 {
-       maddr_t mfn = pfn_to_mfn(page_to_pfn(page)), mfn2;
+       maddr_t maddr = page_to_bus(page);
+       unsigned int seq;
 
        if (!PageForeign(page))
-               return mfn << PAGE_SHIFT;
+               return maddr;
 
-       if (mfn_to_local_pfn(mfn) < max_mapnr)
-               return mfn << PAGE_SHIFT;
+       do {
+               seq = read_seqbegin(&gnttab_dma_lock);
+               maddr = page_to_bus(page);
 
-       atomic_set(&page->_mapcount, 0);
+               /* Has it become a local MFN? */
+               if (pfn_valid(mfn_to_local_pfn(maddr >> PAGE_SHIFT)))
+                       break;
 
-       /* This barrier corresponds to the one in gnttab_copy_grant_page. */
-       smp_mb();
+               atomic_set(&page->_mapcount, 0);
 
-       /* Has this page been copied in the mean time? */
-       mfn2 = pfn_to_mfn(page_to_pfn(page));
+               /* Make _mapcount visible before read_seqretry. */
+               smp_mb();
+       } while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
 
-       return mfn2 << PAGE_SHIFT;
+       return maddr;
 }
 
 int gnttab_resume(void)