]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
more stuff pv-iommu10
authorPaul Durrant <paul.durrant@citrix.com>
Thu, 26 Jul 2018 15:35:54 +0000 (16:35 +0100)
committerPaul Durrant <paul.durrant@citrix.com>
Fri, 27 Jul 2018 15:48:25 +0000 (16:48 +0100)
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
drivers/xen/pv-iommu-xen.c

index defe4c3bc89b3d6dcc14b2ba2da1c54c0d3ad5d1..e6658b3f8a3aa68d10b6dcb99e95ad5d4da09f78 100644 (file)
@@ -54,6 +54,83 @@ static inline dma_addr_t xen_virt_to_bus(void *address)
        return xen_phys_to_bus(virt_to_phys(address));
 }
 
+static int xen_pv_iommu_op(struct xen_iommu_op *op)
+{
+       struct xen_iommu_op_buf buf;
+        int rc;
+
+       set_xen_guest_handle(buf.h, op);
+       buf.size = sizeof(*op);
+
+       rc = HYPERVISOR_iommu_op(1, &buf);
+       if (rc)
+               return rc;
+
+       return op->status;
+}
+
+static void flush(void)
+{
+       struct xen_iommu_op op = {
+               .op = XEN_IOMMUOP_flush,
+       };
+       int rc;
+
+       rc = xen_pv_iommu_op(&op);
+       BUG_ON(rc);
+}
+
+static void map_bus(dma_addr_t dev_addr, size_t size,
+                   enum dma_data_direction dir)
+{
+       dma_addr_t end = dev_addr + size - 1;
+       struct xen_iommu_op op = {
+               .op = XEN_IOMMUOP_map,
+               .u.map.domid = DOMID_SELF,
+       };
+
+       if (dir == DMA_TO_DEVICE)
+               op.u.map.flags |= XEN_IOMMUOP_map_readonly;
+
+       while (dev_addr <= end) {
+               int rc;
+
+               op.u.map.bfn = XEN_PFN_DOWN(dev_addr);
+               op.u.map.u.gfn = XEN_PFN_DOWN(dev_addr);
+
+               rc = xen_pv_iommu_op(&op);
+               BUG_ON(rc);
+
+               dev_addr += XEN_PAGE_SIZE;
+       }
+
+        flush();
+}
+
+static void unmap_bus(dma_addr_t dev_addr, size_t size)
+{
+       dma_addr_t end = dev_addr + size - 1;
+       struct xen_iommu_op op = {
+               .op = XEN_IOMMUOP_unmap,
+               .u.map.domid = DOMID_SELF,
+       };
+
+       while (dev_addr <= end) {
+               int rc;
+
+               op.u.map.bfn = XEN_PFN_DOWN(dev_addr);
+               op.u.map.u.gfn = XEN_PFN_DOWN(dev_addr);
+
+               rc = xen_pv_iommu_op(&op);
+               if (rc)
+                       pr_err("unmap error %d\n", rc);
+
+               dev_addr += XEN_PAGE_SIZE;
+       }
+
+        flush();
+}
+
 static int is_xen_pv_iommu_buffer(dma_addr_t dev_addr)
 {
        phys_addr_t paddr = xen_bus_to_phys(dev_addr);
@@ -68,11 +145,10 @@ static int is_xen_pv_iommu_buffer(dma_addr_t dev_addr)
 static int max_dma_bits = 32;
 
 static int
-xen_pv_iommu_fixup(void *buf, size_t size, unsigned long nslabs)
+xen_pv_iommu_fixup(void *buf, unsigned long nslabs)
 {
        int i, rc;
        int dma_bits;
-       dma_addr_t ignored;
        phys_addr_t p = virt_to_phys(buf);
 
        dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
@@ -80,32 +156,24 @@ xen_pv_iommu_fixup(void *buf, size_t size, unsigned long nslabs)
        i = 0;
        do {
                int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
+               size_t size = slabs << IO_TLB_SHIFT;
+               dma_addr_t dev_addr;
 
                do {
                        rc = xen_create_contiguous_region(
                                p + (i << IO_TLB_SHIFT),
-                               get_order(slabs << IO_TLB_SHIFT),
-                               dma_bits, &ignored);
+                               get_order(size), dma_bits,
+                               &dev_addr);
                } while (rc && dma_bits++ < max_dma_bits);
                if (rc)
                        return rc;
 
+               map_bus(dev_addr, size, DMA_FROM_DEVICE);
                i += slabs;
        } while (i < nslabs);
        return 0;
 }
 
-static unsigned long xen_set_nslabs(unsigned long nr_tbl)
-{
-       if (!nr_tbl) {
-               xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
-               xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
-       } else
-               xen_io_tlb_nslabs = nr_tbl;
-
-       return xen_io_tlb_nslabs << IO_TLB_SHIFT;
-}
-
 enum xen_pv_iommu_err {
        XEN_PV_IOMMU_UNKNOWN = 0,
        XEN_PV_IOMMU_ENOMEM,
@@ -130,6 +198,15 @@ static const char *xen_pv_iommu_error(enum xen_pv_iommu_err err)
 
 int __init xen_pv_iommu_detect(void)
 {
+       struct xen_iommu_op op = {
+               .op = XEN_IOMMUOP_enable,
+       };
+       int rc;
+
+       rc = xen_pv_iommu_op(&op);
+       if ( rc )
+               return 0;
+
        return 1;
 }
 
@@ -142,10 +219,10 @@ void __init xen_pv_iommu_early_init(void)
 
        pr_info("%s: ====>\n", __func__);
 
-       xen_io_tlb_nslabs = swiotlb_nr_tbl();
+       xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
 retry:
-       bytes = xen_set_nslabs(xen_io_tlb_nslabs);
-       order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
+       bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
+       order = get_order(bytes);
        /*
         * Get IO TLB memory from any location.
         */
@@ -159,7 +236,6 @@ retry:
         * And replace that memory with pages under 4GB.
         */
        rc = xen_pv_iommu_fixup(xen_io_tlb_start,
-                               bytes,
                                xen_io_tlb_nslabs);
        if (rc) {
                free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
@@ -202,30 +278,29 @@ xen_pv_iommu_alloc_coherent(struct device *hwdev, size_t size,
                           unsigned long attrs)
 {
        void *ret;
-       int order = get_order(size);
        u64 dma_mask = DMA_BIT_MASK(32);
        phys_addr_t phys;
-       dma_addr_t ignored;
+       dma_addr_t dev_addr;
 
        flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
 
        ret = (void*)__get_free_pages(flags, get_order(size));
-       phys = virt_to_phys(ret);
+       BUG_ON(!ret);
 
-       if (!ret)
-               return ret;
+       phys = virt_to_phys(ret);
 
        if (hwdev && hwdev->coherent_dma_mask)
                dma_mask = hwdev->coherent_dma_mask;
 
-       if (xen_create_contiguous_region(phys, order,
-                                        fls64(dma_mask), &ignored) != 0) {
-               free_pages((unsigned long)ret, get_order(size));
-               return NULL;
-       }
+       if (xen_create_contiguous_region(phys, get_order(size),
+                                        fls64(dma_mask),
+                                        &dev_addr) != 0)
+               BUG();
+
        memset(ret, 0, size);
+       map_bus(dev_addr, size, DMA_FROM_DEVICE);
 
-       *dma_handle = xen_phys_to_bus(phys);
+       *dma_handle = dev_addr;
        return ret;
 }
 
@@ -236,18 +311,19 @@ xen_pv_iommu_free_coherent(struct device *hwdev, size_t size, void *vaddr,
        int order = get_order(size);
        phys_addr_t phys;
 
+       unmap_bus(dev_addr, size);
+
        phys = xen_bus_to_phys(dev_addr);
        xen_destroy_contiguous_region(phys, order);
 
        free_pages((unsigned long)vaddr, get_order(size));
 }
 
-static dma_addr_t xen_pv_iommu_map_page(struct device *dev, struct page *page,
-                                       unsigned long offset, size_t size,
-                                       enum dma_data_direction dir,
-                                       unsigned long attrs)
+static dma_addr_t xen_pv_iommu_map_single(struct device *dev, phys_addr_t phys,
+                                         size_t size, enum dma_data_direction dir,
+                                         unsigned long attrs)
 {
-       phys_addr_t map, phys = page_to_phys(page) + offset;
+       phys_addr_t map;
        dma_addr_t dev_addr = xen_phys_to_bus(phys);
 
        BUG_ON(dir == DMA_NONE);
@@ -256,20 +332,20 @@ static dma_addr_t xen_pv_iommu_map_page(struct device *dev, struct page *page,
 
        map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
                                     attrs);
-       if (map == SWIOTLB_MAP_ERROR)
-               return XEN_PV_IOMMU_ERROR_CODE;
+       BUG_ON(map == SWIOTLB_MAP_ERROR);
 
-       /*
-        * Ensure that the address returned is DMA'ble
-        */
        dev_addr = xen_phys_to_bus(map);
-       if (dma_capable(dev, dev_addr, size))
-               return dev_addr;
+       BUG_ON(!dma_capable(dev, dev_addr, size));
 
-       attrs |= DMA_ATTR_SKIP_CPU_SYNC;
-       swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+       return dev_addr;
+}
 
-       return XEN_PV_IOMMU_ERROR_CODE;
+static dma_addr_t xen_pv_iommu_map_page(struct device *dev, struct page *page,
+                                       unsigned long offset, size_t size,
+                                       enum dma_data_direction dir,
+                                       unsigned long attrs)
+{
+       return xen_pv_iommu_map_single(dev, page_to_phys(page) + offset, size, dir, attrs);
 }
 
 static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
@@ -350,26 +426,10 @@ xen_pv_iommu_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
        BUG_ON(dir == DMA_NONE);
 
        for_each_sg(sgl, sg, nelems, i) {
-               phys_addr_t paddr = sg_phys(sg);
-               dma_addr_t dev_addr = xen_phys_to_bus(paddr);
-               phys_addr_t map = swiotlb_tbl_map_single(hwdev,
-                                                        start_dma_addr,
-                                                        sg_phys(sg),
-                                                        sg->length,
-                                                        dir, attrs);
-
-               if (map == SWIOTLB_MAP_ERROR) {
-                       dev_warn(hwdev, "swiotlb buffer is full\n");
-                       /* Don't panic here, we expect map_sg users
-                          to do proper error handling. */
-                       attrs |= DMA_ATTR_SKIP_CPU_SYNC;
-                       xen_pv_iommu_unmap_sg_attrs(hwdev, sgl, i, dir,
-                                                   attrs);
-                       sg_dma_len(sgl) = 0;
-                       return 0;
-               }
-
-               dev_addr = xen_phys_to_bus(map);
+               dma_addr_t dev_addr = xen_pv_iommu_map_single(hwdev, sg_phys(sg),
+                                                             sg->length,
+                                                             dir, attrs);
+
                sg->dma_address = dev_addr;
                sg_dma_len(sg) = sg->length;
        }