]> xenbits.xensource.com Git - people/ssmith/netchannel2-pvops.git/commitdiff
drm: make sure all pages in vmalloc area are really DMA-ready
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Fri, 2 Oct 2009 18:16:09 +0000 (11:16 -0700)
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Fri, 2 Oct 2009 18:33:09 +0000 (11:33 -0700)
Under Xen, vmalloc_32() isn't guaranteed to return pages which are really
under 4G in machine physical addresses (only in virtual pseudo-physical
addresses).  To work around this, implement a vmalloc variant which
allocates each page with dma_alloc_coherent() to guarantee that each
page is suitable for the device in question.

(This is playing rather fast-and-loose with dma_alloc_coherent by
assuming that vfree will correctly release the pages.  That happens
to be OK for Xen, but will quite likely break if its drawing memory
from another type of IOMMU... So I don't think this alone is the correct
solution.)

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
drivers/gpu/drm/drm_scatter.c

index c7823c863d4f35fb85b0fc5056b19cd08da4b336..8d4437fe446907be31fc214ae4b51f6e33391df8 100644 (file)
  */
 
 #include <linux/vmalloc.h>
+#include <linux/mm.h>
 #include "drmP.h"
 
 #define DEBUG_SCATTER 0
 
-static inline void *drm_vmalloc_dma(unsigned long size)
+static void *drm_vmalloc_dma(struct drm_device *drmdev, unsigned long size)
 {
 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
        return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
 #else
-       return vmalloc_32(size);
+       struct device *dev = &drmdev->pdev->dev;
+       struct page **pages;
+       void *addr;
+       const int npages = PFN_UP(size);
+       int i;
+
+       pages = kmalloc(npages * sizeof(*pages), GFP_KERNEL);
+       if (!pages)
+               goto fail;
+
+       for (i = 0; i < npages; i++) {
+               dma_addr_t phys;
+               void *addr;
+               addr = dma_alloc_coherent(dev, PAGE_SIZE, &phys, GFP_KERNEL);
+               if (addr == NULL)
+                       goto out_free_pages;
+
+               pages[i] = virt_to_page(addr);
+       }
+
+       addr = vmap(pages, npages, VM_MAP | VM_IOREMAP, PAGE_KERNEL);
+
+       kfree(pages);
+
+       return addr;
+
+out_free_pages:
+       while (i > 0) {
+               void *addr = page_address(pages[--i]);
+               dma_free_coherent(dev, PAGE_SIZE, addr, virt_to_bus(addr));
+       }
+
+       kfree(pages);
+
+fail:
+       return NULL;
 #endif
 }
 
@@ -107,7 +143,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
        }
        memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
 
-       entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+       entry->virtual = drm_vmalloc_dma(dev, pages << PAGE_SHIFT);
        if (!entry->virtual) {
                kfree(entry->busaddr);
                kfree(entry->pagelist);