#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bootmem.h>
#include <linux/slab.h>
+#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/kthread.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
-#include <asm/xen/swiotlb-xen.h>
+#include <asm/xen/page-coherent.h>
+
#include <xen/xen.h>
+#include <xen/xen-ops.h>
#include <trace/events/swiotlb.h>
-struct dma_map_ops xen_pv_iommu_dma_ops = {};
+#define XEN_PV_IOMMU_ERROR_CODE (~(dma_addr_t)0x0)
+
+static char *xen_io_tlb_start, *xen_io_tlb_end;
+static unsigned long xen_io_tlb_nslabs;
+
+static u64 start_dma_addr;
+
+static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
+{
+ unsigned long mfn = __pfn_to_mfn(XEN_PFN_DOWN(paddr));
+ dma_addr_t dev_addr = (dma_addr_t)mfn << XEN_PAGE_SHIFT;
+
+ dev_addr |= paddr & ~XEN_PAGE_MASK;
+
+ return dev_addr;
+}
+
+static inline phys_addr_t xen_bus_to_phys(dma_addr_t dev_addr)
+{
+ unsigned long mfn = XEN_PFN_DOWN(dev_addr);
+ phys_addr_t paddr = XEN_PFN_PHYS(mfn_to_pfn(mfn));
+
+ paddr |= dev_addr & ~XEN_PAGE_MASK;
+
+ return paddr;
+}
+
+static inline dma_addr_t xen_virt_to_bus(void *address)
+{
+ return xen_phys_to_bus(virt_to_phys(address));
+}
+
+static int is_xen_pv_iommu_buffer(dma_addr_t dev_addr)
+{
+ phys_addr_t paddr = xen_bus_to_phys(dev_addr);
+
+ if (pfn_valid(PFN_DOWN(paddr))) {
+ return paddr >= virt_to_phys(xen_io_tlb_start) &&
+ paddr < virt_to_phys(xen_io_tlb_end);
+ }
+ return 0;
+}
+
+static int max_dma_bits = 32;
+
+static int
+xen_pv_iommu_fixup(void *buf, size_t size, unsigned long nslabs)
+{
+ int i, rc;
+ int dma_bits;
+ dma_addr_t ignored;
+ phys_addr_t p = virt_to_phys(buf);
+
+ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+
+ i = 0;
+ do {
+ int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
+
+ do {
+ rc = xen_create_contiguous_region(
+ p + (i << IO_TLB_SHIFT),
+ get_order(slabs << IO_TLB_SHIFT),
+ dma_bits, &ignored);
+ } while (rc && dma_bits++ < max_dma_bits);
+ if (rc)
+ return rc;
+
+ i += slabs;
+ } while (i < nslabs);
+ return 0;
+}
+
+static unsigned long xen_set_nslabs(unsigned long nr_tbl)
+{
+ if (!nr_tbl) {
+ xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
+ xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
+ } else
+ xen_io_tlb_nslabs = nr_tbl;
+
+ return xen_io_tlb_nslabs << IO_TLB_SHIFT;
+}
+
+enum xen_pv_iommu_err {
+ XEN_PV_IOMMU_UNKNOWN = 0,
+ XEN_PV_IOMMU_ENOMEM,
+ XEN_PV_IOMMU_EFIXUP
+};
+
+static const char *xen_pv_iommu_error(enum xen_pv_iommu_err err)
+{
+ switch (err) {
+ case XEN_PV_IOMMU_ENOMEM:
+ return "Cannot allocate Xen-SWIOTLB buffer\n";
+ case XEN_PV_IOMMU_EFIXUP:
+ return "Failed to get contiguous memory for DMA from Xen!\n"\
+ "You either: don't have the permissions, do not have"\
+ " enough free memory under 4GB, or the hypervisor memory"\
+ " is too fragmented!";
+ default:
+ break;
+ }
+ return "";
+}
int __init xen_pv_iommu_detect(void)
{
- xen_swiotlb = 1;
-
- return xen_swiotlb;
+ return 1;
}
void __init xen_pv_iommu_early_init(void)
{
+ unsigned long bytes, order;
+ int rc = -ENOMEM;
+ enum xen_pv_iommu_err m_ret = XEN_PV_IOMMU_UNKNOWN;
+ unsigned int repeat = 3;
+
pr_info("%s: ====>\n", __func__);
- xen_swiotlb_init(1, true);
- xen_pv_iommu_dma_ops = xen_swiotlb_dma_ops;
+ xen_io_tlb_nslabs = swiotlb_nr_tbl();
+retry:
+ bytes = xen_set_nslabs(xen_io_tlb_nslabs);
+ order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
+ /*
+ * Get IO TLB memory from any location.
+ */
+ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
+ if (!xen_io_tlb_start) {
+ m_ret = XEN_PV_IOMMU_ENOMEM;
+ goto error;
+ }
+ xen_io_tlb_end = xen_io_tlb_start + bytes;
+ /*
+ * And replace that memory with pages under 4GB.
+ */
+ rc = xen_pv_iommu_fixup(xen_io_tlb_start,
+ bytes,
+ xen_io_tlb_nslabs);
+ if (rc) {
+ free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
+ m_ret = XEN_PV_IOMMU_EFIXUP;
+ goto error;
+ }
+ start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
+
+ if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
+ 1))
+ panic("Cannot allocate SWIOTLB buffer");
+ rc = 0;
+
+ if (!rc)
+ swiotlb_set_max_segment(PAGE_SIZE);
pr_info("%s: <====\n", __func__);
+
+ return;
+error:
+ if (repeat--) {
+ xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
+ (xen_io_tlb_nslabs >> 1));
+ pr_info("Lowering to %luMB\n",
+ (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
+ goto retry;
+ }
+ pr_err("%s (rc:%d)\n", xen_pv_iommu_error(m_ret), rc);
+ panic("%s (rc:%d)", xen_pv_iommu_error(m_ret), rc);
}
void __init xen_pv_iommu_late_init(void)
pr_info("%s: <===>\n", __func__);
}
+static void *
+xen_pv_iommu_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ unsigned long attrs)
+{
+ void *ret;
+ int order = get_order(size);
+ u64 dma_mask = DMA_BIT_MASK(32);
+ phys_addr_t phys;
+ dma_addr_t ignored;
+
+ flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ ret = (void*)__get_free_pages(flags, get_order(size));
+ phys = virt_to_phys(ret);
+
+ if (!ret)
+ return ret;
+
+ if (hwdev && hwdev->coherent_dma_mask)
+ dma_mask = hwdev->coherent_dma_mask;
+
+ if (xen_create_contiguous_region(phys, order,
+ fls64(dma_mask), &ignored) != 0) {
+ free_pages((unsigned long)ret, get_order(size));
+ return NULL;
+ }
+ memset(ret, 0, size);
+
+ *dma_handle = xen_phys_to_bus(phys);
+ return ret;
+}
+
+static void
+xen_pv_iommu_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dev_addr, unsigned long attrs)
+{
+ int order = get_order(size);
+ phys_addr_t phys;
+
+ phys = xen_bus_to_phys(dev_addr);
+ xen_destroy_contiguous_region(phys, order);
+
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
+static dma_addr_t xen_pv_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t map, phys = page_to_phys(page) + offset;
+ dma_addr_t dev_addr = xen_phys_to_bus(phys);
+
+ BUG_ON(dir == DMA_NONE);
+
+ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+
+ map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
+ attrs);
+ if (map == SWIOTLB_MAP_ERROR)
+ return XEN_PV_IOMMU_ERROR_CODE;
+
+ /*
+ * Ensure that the address returned is DMA'ble
+ */
+ dev_addr = xen_phys_to_bus(map);
+ if (dma_capable(dev, dev_addr, size))
+ return dev_addr;
+
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+
+ return XEN_PV_IOMMU_ERROR_CODE;
+}
+
+static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t paddr = xen_bus_to_phys(dev_addr);
+
+ BUG_ON(dir == DMA_NONE);
+ BUG_ON(!is_xen_pv_iommu_buffer(dev_addr));
+
+ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
+}
+
+static void xen_pv_iommu_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
+}
+
+static void
+xen_pv_iommu_sync_single(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ enum dma_sync_target target)
+{
+ phys_addr_t paddr = xen_bus_to_phys(dev_addr);
+
+ BUG_ON(dir == DMA_NONE);
+
+ if (target == SYNC_FOR_CPU)
+ xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
+
+ if (is_xen_pv_iommu_buffer(dev_addr))
+ swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
+
+ if (target == SYNC_FOR_DEVICE)
+ xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
+}
+
+static void
+xen_pv_iommu_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ xen_pv_iommu_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
+}
+
+static void
+xen_pv_iommu_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ xen_pv_iommu_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
+}
+
+static void
+xen_pv_iommu_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(dir == DMA_NONE);
+
+ for_each_sg(sgl, sg, nelems, i)
+ xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
+
+}
+
+static int
+xen_pv_iommu_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(dir == DMA_NONE);
+
+ for_each_sg(sgl, sg, nelems, i) {
+ phys_addr_t paddr = sg_phys(sg);
+ dma_addr_t dev_addr = xen_phys_to_bus(paddr);
+ phys_addr_t map = swiotlb_tbl_map_single(hwdev,
+ start_dma_addr,
+ sg_phys(sg),
+ sg->length,
+ dir, attrs);
+
+ if (map == SWIOTLB_MAP_ERROR) {
+ dev_warn(hwdev, "swiotlb buffer is full\n");
+ /* Don't panic here, we expect map_sg users
+ to do proper error handling. */
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ xen_pv_iommu_unmap_sg_attrs(hwdev, sgl, i, dir,
+ attrs);
+ sg_dma_len(sgl) = 0;
+ return 0;
+ }
+
+ dev_addr = xen_phys_to_bus(map);
+ sg->dma_address = dev_addr;
+ sg_dma_len(sg) = sg->length;
+ }
+ return nelems;
+}
+
+static void
+xen_pv_iommu_sync_sg(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ enum dma_sync_target target)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i)
+ xen_pv_iommu_sync_single(hwdev, sg->dma_address,
+ sg_dma_len(sg), dir, target);
+}
+
+static void
+xen_pv_iommu_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ xen_pv_iommu_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
+}
+
+static void
+xen_pv_iommu_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ xen_pv_iommu_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
+}
+
+static int
+xen_pv_iommu_dma_supported(struct device *hwdev, u64 mask)
+{
+ return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
+}
+
+static int
+xen_pv_iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+static int
+xen_pv_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ unsigned long attrs)
+{
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
+}
+
+static int xen_pv_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == XEN_PV_IOMMU_ERROR_CODE;
+}
+
+const struct dma_map_ops xen_pv_iommu_dma_ops = {
+ .alloc = xen_pv_iommu_alloc_coherent,
+ .free = xen_pv_iommu_free_coherent,
+ .sync_single_for_cpu = xen_pv_iommu_sync_single_for_cpu,
+ .sync_single_for_device = xen_pv_iommu_sync_single_for_device,
+ .sync_sg_for_cpu = xen_pv_iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = xen_pv_iommu_sync_sg_for_device,
+ .map_sg = xen_pv_iommu_map_sg_attrs,
+ .unmap_sg = xen_pv_iommu_unmap_sg_attrs,
+ .map_page = xen_pv_iommu_map_page,
+ .unmap_page = xen_pv_iommu_unmap_page,
+ .dma_supported = xen_pv_iommu_dma_supported,
+ .mmap = xen_pv_iommu_dma_mmap,
+ .get_sgtable = xen_pv_iommu_get_sgtable,
+ .mapping_error = xen_pv_iommu_mapping_error,
+};